diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000000..ca7369315f9e --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,14 @@ +{ + "image": "grafana/loki-build-image:0.28.1", + "containerEnv": { + "BUILD_IN_CONTAINER": "false" + }, + "customizations": { + "vscode": { + "settings": {}, + "extensions": ["golang.go", "Grafana.vscode-jsonnet"] + } + }, + "features": { + } +} diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet index 906206e3025a..e3bba868dabf 100644 --- a/.drone/drone.jsonnet +++ b/.drone/drone.jsonnet @@ -3,6 +3,8 @@ local archs = ['amd64', 'arm64', 'arm']; local build_image_version = std.extVar('__build-image-version'); +local drone_updater_plugin_image = 'us.gcr.io/kubernetes-dev/drone/plugins/updater@sha256:cbcb09c74f96a34c528f52bf9b4815a036b11fed65f685be216e0c8b8e84285b'; + local onPRs = { event: ['pull_request'], }; @@ -11,6 +13,10 @@ local onTagOrMain = { event: ['push', 'tag'], }; +local onTag = { + event: ['tag'], +}; + local onPath(path) = { paths+: [path], }; @@ -44,6 +50,7 @@ local gpg_private_key = secret('gpg_private_key', 'infra/data/ci/packages-publis // Injected in a secret because this is a public repository and having the config here would leak our environment names local updater_config_template = secret('updater_config_template', 'secret/data/common/loki_ci_autodeploy', 'updater-config-template.json'); +local helm_chart_auto_update_config_template = secret('helm-chart-update-config-template', 'secret/data/common/loki-helm-chart-auto-update', 'on-loki-release-config.json'); local run(name, commands, env={}) = { name: name, @@ -446,7 +453,7 @@ local manifest_ecr(apps, archs) = pipeline('manifest-ecr') { [ pipeline('loki-build-image') { - local build_image_tag = '0.27.1', + local build_image_tag = '0.28.1', workspace: { base: '/src', path: 'loki', @@ -665,7 +672,7 @@ local manifest_ecr(apps, archs) = pipeline('manifest-ecr') { }, { name: 'trigger', - image: 'us.gcr.io/kubernetes-dev/drone/plugins/updater', + image: drone_updater_plugin_image, settings: { github_token: { from_secret: github_secret.name }, config_file: configFileName, @@ -674,6 +681,56 @@ local manifest_ecr(apps, archs) = pipeline('manifest-ecr') { }, ], }, + pipeline('update-loki-helm-chart-on-loki-release') { + local configFileName = 'updater-config.json', + depends_on: ['manifest'], + image_pull_secrets: [pull_secret.name], + trigger: { + // wee need to run it only on Loki tags that starts with `v`. + ref: ['refs/tags/v*'], + }, + steps: [ + { + name: 'check-version-is-latest', + image: 'alpine', + when: onTag, + commands: [ + 'apk add --no-cache bash git', + 'git fetch --tags', + "latest_version=$(git tag -l 'v[0-9]*.[0-9]*.[0-9]*' | sort -V | tail -n 1 | sed 's/v//g')", + 'RELEASE_TAG=$(./tools/image-tag)', + 'if [ "$RELEASE_TAG" != "$latest_version" ]; then echo "Current version $RELEASE_TAG is not the latest version of Loki. The latest version is $latest_version" && exit 78; fi', + ], + }, + { + name: 'prepare-helm-chart-update-config', + image: 'alpine', + depends_on: ['check-version-is-latest'], + commands: [ + 'apk add --no-cache bash git', + 'git fetch origin --tags', + 'RELEASE_TAG=$(./tools/image-tag)', + 'echo $PLUGIN_CONFIG_TEMPLATE > %s' % configFileName, + // replace placeholders with RELEASE TAG + 'sed -i -E "s/\\{\\{release\\}\\}/$RELEASE_TAG/g" %s' % configFileName, + ], + settings: { + config_template: { from_secret: helm_chart_auto_update_config_template.name }, + }, + }, + { + name: 'trigger-helm-chart-update', + image: drone_updater_plugin_image, + settings: { + github_token: { + from_secret: github_secret.name, + }, + config_file: configFileName, + }, + depends_on: ['prepare-helm-chart-update-config'], + }, + ], + }, promtail_win(), logql_analyzer(), pipeline('release') { @@ -818,6 +875,7 @@ local manifest_ecr(apps, archs) = pipeline('manifest-ecr') { ecr_key, ecr_secret_key, updater_config_template, + helm_chart_auto_update_config_template, gpg_passphrase, gpg_private_key, ] diff --git a/.drone/drone.yml b/.drone/drone.yml index 8a4112706fcd..8dadc0aff5d4 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -10,7 +10,7 @@ steps: dry_run: true repo: grafana/loki-build-image tags: - - 0.27.1 + - 0.28.1 when: event: - pull_request @@ -26,7 +26,7 @@ steps: from_secret: docker_password repo: grafana/loki-build-image tags: - - 0.27.1 + - 0.28.1 username: from_secret: docker_username when: @@ -93,14 +93,14 @@ steps: depends_on: - clone environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: check-drone-drift - commands: - make BUILD_IN_CONTAINER=false check-generated-files depends_on: - clone environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: check-generated-files - commands: - cd .. @@ -110,7 +110,7 @@ steps: depends_on: - clone environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: clone-target-branch when: event: @@ -121,7 +121,7 @@ steps: - clone-target-branch - check-generated-files environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: test - commands: - cd ../loki-target-branch @@ -129,7 +129,7 @@ steps: depends_on: - clone-target-branch environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: test-target-branch when: event: @@ -142,7 +142,7 @@ steps: - test - test-target-branch environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: compare-coverage when: event: @@ -160,7 +160,7 @@ steps: TOKEN: from_secret: github_token USER: grafanabot - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: report-coverage when: event: @@ -170,7 +170,7 @@ steps: depends_on: - check-generated-files environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: lint - commands: - make BUILD_IN_CONTAINER=false check-mod @@ -178,7 +178,7 @@ steps: - test - lint environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: check-mod - commands: - apk add make bash && make lint-scripts @@ -189,28 +189,28 @@ steps: depends_on: - check-generated-files environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: loki - commands: - make BUILD_IN_CONTAINER=false check-doc depends_on: - loki environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: check-doc - commands: - make BUILD_IN_CONTAINER=false validate-example-configs depends_on: - loki environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: validate-example-configs - commands: - make BUILD_IN_CONTAINER=false check-example-config-doc depends_on: - clone environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: check-example-config-doc - commands: - mkdir -p /hugo/content/docs/loki/latest @@ -243,7 +243,7 @@ steps: depends_on: - clone environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: loki-mixin-check when: event: @@ -268,7 +268,7 @@ steps: depends_on: - clone environment: {} - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: documentation-helm-reference-check trigger: ref: @@ -1254,7 +1254,7 @@ steps: from_secret: updater_config_template - depends_on: - prepare-updater-config - image: us.gcr.io/kubernetes-dev/drone/plugins/updater + image: us.gcr.io/kubernetes-dev/drone/plugins/updater@sha256:cbcb09c74f96a34c528f52bf9b4815a036b11fed65f685be216e0c8b8e84285b name: trigger settings: config_file: updater-config.json @@ -1270,6 +1270,52 @@ trigger: - refs/tags/v* - refs/pull/*/head --- +depends_on: +- manifest +image_pull_secrets: +- dockerconfigjson +kind: pipeline +name: update-loki-helm-chart-on-loki-release +steps: +- commands: + - apk add --no-cache bash git + - git fetch --tags + - latest_version=$(git tag -l 'v[0-9]*.[0-9]*.[0-9]*' | sort -V | tail -n 1 | sed + 's/v//g') + - RELEASE_TAG=$(./tools/image-tag) + - if [ "$RELEASE_TAG" != "$latest_version" ]; then echo "Current version $RELEASE_TAG + is not the latest version of Loki. The latest version is $latest_version" && exit + 78; fi + image: alpine + name: check-version-is-latest + when: + event: + - tag +- commands: + - apk add --no-cache bash git + - git fetch origin --tags + - RELEASE_TAG=$(./tools/image-tag) + - echo $PLUGIN_CONFIG_TEMPLATE > updater-config.json + - sed -i -E "s/\{\{release\}\}/$RELEASE_TAG/g" updater-config.json + depends_on: + - check-version-is-latest + image: alpine + name: prepare-helm-chart-update-config + settings: + config_template: + from_secret: helm-chart-update-config-template +- depends_on: + - prepare-helm-chart-update-config + image: us.gcr.io/kubernetes-dev/drone/plugins/updater@sha256:cbcb09c74f96a34c528f52bf9b4815a036b11fed65f685be216e0c8b8e84285b + name: trigger-helm-chart-update + settings: + config_file: updater-config.json + github_token: + from_secret: github_token +trigger: + ref: + - refs/tags/v* +--- kind: pipeline name: promtail-windows platform: @@ -1370,7 +1416,7 @@ steps: NFPM_SIGNING_KEY: from_secret: gpg_private_key NFPM_SIGNING_KEY_FILE: /drone/src/private-key.key - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: write-key - commands: - make BUILD_IN_CONTAINER=false packages @@ -1378,7 +1424,7 @@ steps: NFPM_PASSPHRASE: from_secret: gpg_passphrase NFPM_SIGNING_KEY_FILE: /drone/src/private-key.key - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: test packaging - commands: - ./tools/packaging/verify-deb-install.sh @@ -1404,7 +1450,7 @@ steps: NFPM_PASSPHRASE: from_secret: gpg_passphrase NFPM_SIGNING_KEY_FILE: /drone/src/private-key.key - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: publish when: event: @@ -1438,7 +1484,7 @@ steps: from_secret: docker_password DOCKER_USERNAME: from_secret: docker_username - image: grafana/loki-build-image:0.27.1 + image: grafana/loki-build-image:0.28.1 name: build and push privileged: true volumes: @@ -1662,6 +1708,12 @@ get: kind: secret name: updater_config_template --- +get: + name: on-loki-release-config.json + path: secret/data/common/loki-helm-chart-auto-update +kind: secret +name: helm-chart-update-config-template +--- get: name: passphrase path: infra/data/ci/packages-publish/gpg @@ -1675,6 +1727,6 @@ kind: secret name: gpg_private_key --- kind: signature -hmac: c1930caa8e7ffedf82b641cc1204d87319cddf576efa787d2ef1a86d16c2b9cf +hmac: ae19447e0f2e91746c5b4e6a3dc469a08a63c44045f4b995a6980d0e401e071f ... diff --git a/.github/workflows/publish-technical-documentation-next.yml b/.github/workflows/publish-technical-documentation-next.yml index f5b0d007b8f3..d1105729420e 100644 --- a/.github/workflows/publish-technical-documentation-next.yml +++ b/.github/workflows/publish-technical-documentation-next.yml @@ -28,9 +28,11 @@ jobs: uses: "actions/checkout@v3.3.0" - name: "Clone website-sync Action" - # WEBSITE_SYNC_LOKI is a fine-grained GitHub Personal Access Token that expires. - # It must be updated in the grafanabot GitHub account. - run: "git clone --single-branch --no-tags --depth 1 -b master https://grafanabot:${{ secrets.WEBSITE_SYNC_LOKI }}@github.com/grafana/website-sync ./.github/actions/website-sync" + # WEBSITE_SYNC_TOKEN is a fine-grained GitHub Personal Access Token that expires. + # It must be regenerated in the grafanabot GitHub account and requires a Grafana organization + # GitHub administrator to update the organization secret. + # The IT helpdesk can update the organization secret. + run: "git clone --single-branch --no-tags --depth 1 -b master https://grafanabot:${{ secrets.WEBSITE_SYNC_TOKEN }}@github.com/grafana/website-sync ./.github/actions/website-sync" - name: "Publish to website repository (next)" uses: "./.github/actions/website-sync" @@ -39,8 +41,10 @@ jobs: repository: "grafana/website" branch: "master" host: "github.com" - # PUBLISH_TO_WEBSITE_LOKI is a fine-grained GitHub Personal Access Token that expires. - # It must be updated in the grafanabot GitHub account. - github_pat: "grafanabot:${{ secrets.PUBLISH_TO_WEBSITE_LOKI }}" + # PUBLISH_TO_WEBSITE_TOKEN is a fine-grained GitHub Personal Access Token that expires. + # It must be regenerated in the grafanabot GitHub account and requires a Grafana organization + # GitHub administrator to update the organization secret. + # The IT helpdesk can update the organization secret. + github_pat: "grafanabot:${{ secrets.PUBLISH_TO_WEBSITE_TOKEN }}" source_folder: "docs/sources" target_folder: "content/docs/loki/next" diff --git a/.github/workflows/publish-technical-documentation-release.yml b/.github/workflows/publish-technical-documentation-release.yml index e9f5ffaecc07..77ebf01498ce 100644 --- a/.github/workflows/publish-technical-documentation-release.yml +++ b/.github/workflows/publish-technical-documentation-release.yml @@ -58,9 +58,11 @@ jobs: - name: "Clone website-sync Action" if: "steps.has-matching-release-tag.outputs.bool == 'true'" - # WEBSITE_SYNC_LOKI is a fine-grained GitHub Personal Access Token that expires. - # It must be updated in the grafanabot GitHub account. - run: "git clone --single-branch --no-tags --depth 1 -b master https://grafanabot:${{ secrets.WEBSITE_SYNC_LOKI }}@github.com/grafana/website-sync ./.github/actions/website-sync" + # WEBSITE_SYNC_TOKEN is a fine-grained GitHub Personal Access Token that expires. + # It must be regenerated in the grafanabot GitHub account and requires a Grafana organization + # GitHub administrator to update the organization secret. + # The IT helpdesk can update the organization secret. + run: "git clone --single-branch --no-tags --depth 1 -b master https://grafanabot:${{ secrets.WEBSITE_SYNC_TOKEN }}@github.com/grafana/website-sync ./.github/actions/website-sync" - name: "Publish to website repository (release)" if: "steps.has-matching-release-tag.outputs.bool == 'true'" @@ -70,8 +72,10 @@ jobs: repository: "grafana/website" branch: "master" host: "github.com" - # PUBLISH_TO_WEBSITE_LOKI is a fine-grained GitHub Personal Access Token that expires. - # It must be updated in the grafanabot GitHub account. - github_pat: "grafanabot:${{ secrets.PUBLISH_TO_WEBSITE_LOKI }}" + # PUBLISH_TO_WEBSITE_TOKEN is a fine-grained GitHub Personal Access Token that expires. + # It must be regenerated in the grafanabot GitHub account and requires a Grafana organization + # GitHub administrator to update the organization secret. + # The IT helpdesk can update the organization secret. + github_pat: "grafanabot:${{ secrets.PUBLISH_TO_WEBSITE_TOKEN }}" source_folder: "docs/sources" target_folder: "content/docs/loki/${{ steps.target.outputs.target }}.x" diff --git a/.golangci.yml b/.golangci.yml index c91154200a9a..08340a16095e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,7 +4,7 @@ # options for analysis running run: # define go version - go: "1.19" + go: "1.20" # default concurrency is a available CPU number concurrency: 16 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1877d5bcc45c..fa381fe8fe0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,12 +6,12 @@ ##### Enhancements +* [8532](https://github.com/grafana/loki/pull/8532) **justcompile**: Adds Storage Class option to S3 objects * [7951](https://github.com/grafana/loki/pull/7951) **MichelHollands**: Add a count template function to line_format and label_format. * [7380](https://github.com/grafana/loki/pull/7380) **liguozhong**: metrics query: range vector support streaming agg when no overlap. * [7731](https://github.com/grafana/loki/pull/7731) **bitkill**: Add healthchecks to the docker-compose example. * [7759](https://github.com/grafana/loki/pull/7759) **kavirajk**: Improve error message for loading config with ENV variables. * [7785](https://github.com/grafana/loki/pull/7785) **dannykopping**: Add query blocker for queries and rules. -* [7804](https://github.com/grafana/loki/pull/7804) **sandeepsukhani**: Use grpc for communicating with compactor for query time filtering of data requested for deletion. * [7817](https://github.com/grafana/loki/pull/7817) **kavirajk**: fix(memcached): panic on send on closed channel. * [7916](https://github.com/grafana/loki/pull/7916) **ssncferreira**: Add `doc-generator` tool to generate configuration flags documentation. * [7964](https://github.com/grafana/loki/pull/7964) **slim-bean**: Add a `since` query parameter to allow querying based on relative time. @@ -29,6 +29,8 @@ * [8271](https://github.com/grafana/loki/pull/8271) **kavirajk**: logql: Support urlencode and urldecode template functions * [8259](https://github.com/grafana/loki/pull/8259) **mar4uk**: Extract push.proto from the logproto package to the separate module. * [7906](https://github.com/grafana/loki/pull/7906) **kavirajk**: Add API endpoint that formats LogQL expressions and support new `fmt` subcommand in `logcli` to format LogQL query. +* [6675](https://github.com/grafana/loki/pull/6675) **btaani**: Add logfmt expression parser for selective extraction of labels from logfmt formatted logs +* [8474](https://github.com/grafana/loki/pull/8474) **farodin91**: Add support for short-lived S3 session tokens ##### Fixes @@ -36,20 +38,21 @@ * [7880](https://github.com/grafana/loki/pull/7880) **sandeepsukhani**: consider range and offset in queries while looking for schema config for query sharding. * [7937](https://github.com/grafana/loki/pull/7937) **ssncferreira**: Deprecate CLI flag `-ruler.wal-cleaer.period` and replace it with `-ruler.wal-cleaner.period`. * [7966](https://github.com/grafana/loki/pull/7966) **sandeepsukhani**: Fix query-frontend request load balancing when using k8s service. -* [7988](https://github.com/grafana/loki/pull/7988) **ashwanthgoli** store: write overlapping chunks to multiple stores. -* [7925](https://github.com/grafana/loki/pull/7925) **sandeepsukhani**: Fix bugs in logs results caching causing query-frontend to return logs outside of query window. -* [8120](https://github.com/grafana/loki/pull/8120) **ashwanthgoli** fix panic on hitting /scheduler/ring when ring is disabled. * [8251](https://github.com/grafana/loki/pull/8251) **sandeepsukhani** index-store: fix indexing of chunks overlapping multiple schemas. * [8151](https://github.com/grafana/loki/pull/8151) **sandeepsukhani** fix log deletion with line filters. +* [8448](https://github.com/grafana/loki/pull/8448) **chaudum**: Fix bug in LogQL parser that caused certain queries that contain a vector expression to fail. ##### Changes * [8315](https://github.com/grafana/loki/pull/8315) **thepalbi** Relicense and export `pkg/ingester` WAL code to be used in Promtail's WAL. +##### Build + #### Promtail ##### Enhancements +* [8231](https://github.com/grafana/loki/pull/8231) **CCOLLOT**: Lambda-promtail: add support for AWS SQS message ingestion. * [7619](https://github.com/grafana/loki/pull/7619) **cadrake**: Add ability to pass query params to heroku drain targets for relabelling. * [7973](https://github.com/grafana/loki/pull/7973) **chodges15**: Add configuration to drop rate limited batches in Loki client and new metric label for drop reason. * [8153](https://github.com/grafana/loki/pull/8153) **kavirajk**: promtail: Add `max-line-size` limit to drop on client side @@ -57,11 +60,12 @@ * [8233](https://github.com/grafana/loki/pull/8233) **nicoche**: promtail: Add `max-line-size-truncate` limit to truncate too long lines on client side * [7462](https://github.com/grafana/loki/pull/7462) **MarNicGit**: Allow excluding event message from Windows Event Log entries. * [7597](https://github.com/grafana/loki/pull/7597) **redbaron**: allow ratelimiting by label +* [3493](https://github.com/grafana/loki/pull/3493) **adityacs** Support geoip stage. +* [8382](https://github.com/grafana/loki/pull/8382) **kelnage**: Promtail: Add event log message stage ##### Fixes -* [7771](https://github.com/grafana/loki/pull/7771) **GeorgeTsilias**: Handle nil error on target Details() call. -* [7461](https://github.com/grafana/loki/pull/7461) **MarNicGit**: Promtail: Fix collecting userdata field from Windows Event Log +* [8231](https://github.com/grafana/loki/pull/8231) **CCOLLOT**: Lambda-promtail: fix flushing behavior of batches, leading to a significant increase in performance. ##### Changes @@ -70,6 +74,7 @@ ##### Enhancement * [8413](https://github.com/grafana/loki/pull/8413) **chaudum**: Try to load tenant-specific `schemaconfig-{orgID}.yaml` when using `--remote-schema` argument and fallback to global `schemaconfig.yaml`. +* [8537](https://github.com/grafana/loki/pull/8537) **jeschkies**: Allow fetching all entries with `--limit=0`. #### Fluent Bit @@ -80,6 +85,7 @@ * [8024](https://github.com/grafana/loki/pull/8024) **jijotj**: Support passing loki address as environment variable #### Jsonnet + * [7923](https://github.com/grafana/loki/pull/7923) **manohar-koukuntla**: Add zone aware ingesters in jsonnet deployment ##### Fixes @@ -94,6 +100,32 @@ ### Dependencies +## 2.7.4 (2023-02-24) + +#### Loki + +##### Fixes + +* [8531](https://github.com/grafana/loki/pull/8531) **garrettlish**: logql: fix panics when cloning a special query +* [8120](https://github.com/grafana/loki/pull/8120) **ashwanthgoli**: fix panic on hitting /scheduler/ring when ring is disabled. +* [7988](https://github.com/grafana/loki/pull/7988) **ashwanthgoli**: store: write overlapping chunks to multiple stores. +* [7925](https://github.com/grafana/loki/pull/7925) **sandeepsukhani**: Fix bugs in logs results caching causing query-frontend to return logs outside of query window. + +##### Build + +* [8575](https://github.com/grafana/loki/pull/8575) **MichelHollands**: Update build image to go 1.20.1 and alpine 3.16.4. +* [8583](https://github.com/grafana/loki/pull/8583) **MichelHollands**: Use 0.28.1 build image and update go and alpine versions. + +#### Promtail + +##### Enhancements + +##### Fixes + +* [8497](https://github.com/grafana/loki/pull/8497) **kavirajk**: Fix `cri` tags treating different streams as the same +* [7771](https://github.com/grafana/loki/pull/7771) **GeorgeTsilias**: Handle nil error on target Details() call. +* [7461](https://github.com/grafana/loki/pull/7461) **MarNicGit**: Promtail: Fix collecting userdata field from Windows Event Log + ## 2.7.3 (2023-02-01) #### Loki @@ -230,6 +262,7 @@ Check the history of the branch `release-2.7.x`. #### Logcli * [7325](https://github.com/grafana/loki/pull/7325) **dbirks**: Document setting up command completion +* [8518](https://github.com/grafana/loki/pull/8518) **SN9NV**: Add parallel flags #### Fluent Bit @@ -463,6 +496,7 @@ to include only the most relevant. * [5544](https://github.com/grafana/loki/pull/5544) **ssncferreira**: Update vectorAggEvaluator to fail for expressions without grouping * [5543](https://github.com/grafana/loki/pull/5543) **cyriltovena**: update loki go version to 1.17.8 * [5450](https://github.com/grafana/loki/pull/5450) **BenoitKnecht**: pkg/ruler/base: Add external_labels option +* [5522](https://github.com/grafana/loki/pull/5522) **liguozhong**: chunk backend: Integrate Alibaba Cloud oss * [5484](https://github.com/grafana/loki/pull/5484) **sandeepsukhani**: Add support for per user index query readiness with limits overrides * [5719](https://github.com/grafana/loki/pull/5719) **kovaxur**: Loki can use both basic-auth and tenant-id * [5358](https://github.com/grafana/loki/pull/5358) **DylanGuedes**: Add `RingMode` support to `IndexGateway` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 935ff16a05e0..f962ee676870 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -130,3 +130,23 @@ import ( "github.com/grafana/loki/pkg/logql" ) ``` + +## Contribute to documentation + +We're glad you're here to help make our technical documentation even better for Loki users. + +The Grafana docs team has created a [Writer's Toolkit](https://grafana.com/docs/writers-toolkit/writing-guide/contribute-documentation/) that includes information about how we write docs, a [Style Guide](https://grafana.com/docs/writers-toolkit/style-guide/), and templates to help you contribute to the Loki documentation. + +The Loki documentation is written using the CommonMark flavor of markdown, including some extended features. For more information about markdown, you can see the [CommonMark specification](https://spec.commonmark.org/), and a [quick reference guide](https://commonmark.org/help/) for CommonMark. + +Loki uses the static site generator [Hugo](https://gohugo.io/) to generate the documentation. Loki uses a continuous integration (CI) action to sync documentation to the [Grafana website](https://grafana.com/docs/loki/latest). The CI is triggered on every merge to main in the `docs` subfolder. + +You can preview the documentation locally after installing [Docker](https://www.docker.com/) or [Podman](https://podman.io/). + +To get a local preview of the documentation: +1. Run Docker (or Podman). +2. Navigate to the directory with the makefile, `/loki/docs`. +3. Run the command `make docs`. This uses the `grafana/docs` image which internally uses Hugo to generate the static site. +4. Open http://localhost:3002/docs/loki/latest/ to review your changes. + +> Note that `make docs` uses a lot of memory. If it crashes, increase the memory allocated to Docker and try again. diff --git a/Makefile b/Makefile index 9bab4f26383e..22783485eec5 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ DOCKER_IMAGE_DIRS := $(patsubst %/Dockerfile,%,$(DOCKERFILES)) BUILD_IN_CONTAINER ?= true # ensure you run `make drone` after changing this -BUILD_IMAGE_VERSION := 0.27.1 +BUILD_IMAGE_VERSION := 0.28.1 # Docker image info IMAGE_PREFIX ?= grafana @@ -760,11 +760,18 @@ validate-example-configs: loki # Dynamically generate ./docs/sources/configuration/examples.md using the example configs that we provide. # This target should be run if any of our example configs change. generate-example-config-doc: - echo "Removing existing doc at loki/docs/configuration/examples.md and re-generating. . ." + $(eval CONFIG_DOC_PATH=$(DOC_SOURCES_PATH)/configuration) + $(eval CONFIG_EXAMPLES_PATH=$(CONFIG_DOC_PATH)/examples) + echo "Removing existing doc at $(CONFIG_DOC_PATH)/examples.md and re-generating. . ." # Title and Heading - echo -e "---\ntitle: Examples\ndescription: Loki Configuration Examples\n---\n # Examples" > ./docs/sources/configuration/examples.md + echo -e "---\ntitle: Examples\ndescription: Loki Configuration Examples\n---\n # Examples" > $(CONFIG_DOC_PATH)/examples.md # Append each configuration and its file name to examples.md - for f in ./docs/sources/configuration/examples/*.yaml; do echo -e "\n## $$(basename $$f)\n\n\`\`\`yaml\n$$(cat $$f)\n\`\`\`\n" >> ./docs/sources/configuration/examples.md; done + for f in $$(find $(CONFIG_EXAMPLES_PATH)/*.yaml -printf "%f\n" | sort -k1n); do \ + echo -e "\n## $$f\n\n\`\`\`yaml\n" >> $(CONFIG_DOC_PATH)/examples.md; \ + cat $(CONFIG_EXAMPLES_PATH)/$$f >> $(CONFIG_DOC_PATH)/examples.md; \ + echo -e "\n\`\`\`\n" >> $(CONFIG_DOC_PATH)/examples.md; \ + done + # Fail our CI build if changes are made to example configurations but our doc is not updated check-example-config-doc: generate-example-config-doc diff --git a/clients/cmd/docker-driver/Dockerfile b/clients/cmd/docker-driver/Dockerfile index 7309a39a20bc..fd60286a6a57 100644 --- a/clients/cmd/docker-driver/Dockerfile +++ b/clients/cmd/docker-driver/Dockerfile @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.27.1 +ARG BUILD_IMAGE=grafana/loki-build-image:0.28.1 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile . @@ -9,7 +9,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false clients/cmd/docker-driver/docker-driver -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN apk add --update --no-cache ca-certificates tzdata COPY --from=build /src/loki/clients/cmd/docker-driver/docker-driver /bin/docker-driver WORKDIR /bin/ diff --git a/clients/cmd/docker-driver/config.go b/clients/cmd/docker-driver/config.go index c38cdcc33ee2..95dd07a6d8e8 100644 --- a/clients/cmd/docker-driver/config.go +++ b/clients/cmd/docker-driver/config.go @@ -366,7 +366,7 @@ func relabelConfig(config string, lbs model.LabelSet) (model.LabelSet, error) { if err := yaml.UnmarshalStrict([]byte(config), &relabelConfig); err != nil { return nil, err } - relabed := relabel.Process(labels.FromMap(util.ModelLabelSetToMap(lbs)), relabelConfig...) + relabed, _ := relabel.Process(labels.FromMap(util.ModelLabelSetToMap(lbs)), relabelConfig...) return model.LabelSet(util.LabelsToMetric(relabed)), nil } diff --git a/clients/cmd/fluentd/Makefile b/clients/cmd/fluentd/Makefile index 63b9123bddcb..cc661e23f27b 100644 --- a/clients/cmd/fluentd/Makefile +++ b/clients/cmd/fluentd/Makefile @@ -4,7 +4,7 @@ SHELL = /usr/bin/env bash -o pipefail # needs to match the name in fluent-plugin-grafana-loki.gemspec NAME := fluent-plugin-grafana-loki # needs to match the version in fluent-plugin-grafana-loki.gemspec -VERSION := 1.2.19 +VERSION := 1.2.20 clean: rm -f $(NAME)-*.gem diff --git a/clients/cmd/fluentd/fluent-plugin-grafana-loki.gemspec b/clients/cmd/fluentd/fluent-plugin-grafana-loki.gemspec index 433a1a53a1bd..ed8df8b99420 100644 --- a/clients/cmd/fluentd/fluent-plugin-grafana-loki.gemspec +++ b/clients/cmd/fluentd/fluent-plugin-grafana-loki.gemspec @@ -4,7 +4,7 @@ $LOAD_PATH.push File.expand_path('lib', __dir__) Gem::Specification.new do |spec| spec.name = 'fluent-plugin-grafana-loki' - spec.version = '1.2.19' + spec.version = '1.2.20' spec.authors = %w[woodsaj briangann cyriltovena] spec.email = ['awoods@grafana.com', 'brian@grafana.com', 'cyril.tovena@grafana.com'] diff --git a/clients/cmd/promtail/Dockerfile b/clients/cmd/promtail/Dockerfile index a8520f89da05..fbdbb38b1116 100644 --- a/clients/cmd/promtail/Dockerfile +++ b/clients/cmd/promtail/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.5-bullseye as build +FROM golang:1.20.1-bullseye as build COPY . /src/loki WORKDIR /src/loki diff --git a/clients/cmd/promtail/Dockerfile.arm32 b/clients/cmd/promtail/Dockerfile.arm32 index e530815f782a..45f58c26726f 100644 --- a/clients/cmd/promtail/Dockerfile.arm32 +++ b/clients/cmd/promtail/Dockerfile.arm32 @@ -1,4 +1,4 @@ -FROM golang:1.19.5-bullseye as build +FROM golang:1.20.1-bullseye as build COPY . /src/loki WORKDIR /src/loki diff --git a/clients/cmd/promtail/Dockerfile.cross b/clients/cmd/promtail/Dockerfile.cross index 283604eb0669..773fb235eeb1 100644 --- a/clients/cmd/promtail/Dockerfile.cross +++ b/clients/cmd/promtail/Dockerfile.cross @@ -1,8 +1,8 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.27.1 +ARG BUILD_IMAGE=grafana/loki-build-image:0.28.1 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f clients/cmd/promtail/Dockerfile . -FROM golang:1.19.5-alpine as goenv +FROM golang:1.20.1-alpine as goenv RUN go env GOARCH > /goarch && \ go env GOARM > /goarm diff --git a/clients/cmd/promtail/Dockerfile.debug b/clients/cmd/promtail/Dockerfile.debug index c49444093104..3bd361a4dd24 100644 --- a/clients/cmd/promtail/Dockerfile.debug +++ b/clients/cmd/promtail/Dockerfile.debug @@ -2,14 +2,14 @@ # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f clients/cmd/promtail/Dockerfile.debug . -FROM grafana/loki-build-image:0.27.1 as build +FROM grafana/loki-build-image:0.28.1 as build ARG GOARCH="amd64" COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false PROMTAIL_JOURNAL_ENABLED=true promtail-debug -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN apk add --update --no-cache ca-certificates tzdata COPY --from=build /src/loki/clients/cmd/promtail/promtail-debug /usr/bin/promtail-debug COPY --from=build /usr/bin/dlv /usr/bin/dlv diff --git a/clients/pkg/logentry/stages/eventlogmessage.go b/clients/pkg/logentry/stages/eventlogmessage.go new file mode 100644 index 000000000000..e637c5c92098 --- /dev/null +++ b/clients/pkg/logentry/stages/eventlogmessage.go @@ -0,0 +1,160 @@ +package stages + +import ( + "fmt" + "strings" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + "github.com/prometheus/common/model" +) + +const ( + defaultSource = "message" + ErrEmptyEvtLogMsgStageConfig = "empty event log message stage configuration" +) + +type EventLogMessageConfig struct { + Source *string `mapstructure:"source"` + DropInvalidLabels bool `mapstructure:"drop_invalid_labels"` + OverwriteExisting bool `mapstructure:"overwrite_existing"` +} + +type eventLogMessageStage struct { + cfg *EventLogMessageConfig + logger log.Logger +} + +// Create a event log message stage, including validating any supplied configuration +func newEventLogMessageStage(logger log.Logger, config interface{}) (Stage, error) { + cfg, err := parseEventLogMessageConfig(config) + if err != nil { + return nil, err + } + // validate config (i.e., check that source is a valid log label) + err = validateEventLogMessageConfig(cfg) + if err != nil { + return nil, err + } + return &eventLogMessageStage{ + cfg: cfg, + logger: log.With(logger, "component", "stage", "type", "event_log_message"), + }, nil +} + +// Parse the event log message configuration, creating a default configuration struct otherwise +func parseEventLogMessageConfig(config interface{}) (*EventLogMessageConfig, error) { + cfg := &EventLogMessageConfig{} + err := mapstructure.Decode(config, cfg) + if err != nil { + return nil, err + } + return cfg, nil +} + +// Ensure a event log message configuration object is valid, checking that any specified source +// is a valid label name, and setting default values if a nil config object is provided +func validateEventLogMessageConfig(c *EventLogMessageConfig) error { + if c == nil { + return errors.New(ErrEmptyEvtLogMsgStageConfig) + } + if c.Source != nil && !model.LabelName(*c.Source).IsValid() { + return fmt.Errorf(ErrInvalidLabelName, *c.Source) + } + return nil +} + +func (m *eventLogMessageStage) Run(in chan Entry) chan Entry { + out := make(chan Entry) + key := defaultSource + if m.cfg.Source != nil { + key = *m.cfg.Source + } + go func() { + defer close(out) + for e := range in { + err := m.processEntry(e.Extracted, key) + if err != nil { + continue + } + out <- e + } + }() + return out +} + +// Process a event log message from extracted with the specified key, adding additional +// entries into the extracted map +func (m *eventLogMessageStage) processEntry(extracted map[string]interface{}, key string) error { + value, ok := extracted[key] + if !ok { + if Debug { + level.Debug(m.logger).Log("msg", "source not in the extracted values", "source", key) + } + return nil + } + s, err := getString(value) + if err != nil { + level.Warn(m.logger).Log("msg", "invalid label value parsed", "value", value) + return err + } + lines := strings.Split(s, "\r\n") + for _, line := range lines { + parts := strings.SplitN(line, ":", 2) + if len(parts) < 2 { + level.Warn(m.logger).Log("msg", "invalid line parsed from message", "line", line) + continue + } + mkey := parts[0] + if !model.LabelName(mkey).IsValid() { + if m.cfg.DropInvalidLabels { + if Debug { + level.Debug(m.logger).Log("msg", "invalid label parsed from message", "key", mkey) + } + continue + } + mkey = SanitizeFullLabelName(mkey) + } + if _, ok := extracted[mkey]; ok && !m.cfg.OverwriteExisting { + level.Info(m.logger).Log("msg", "extracted key that already existed, appending _extracted to key", + "key", mkey) + mkey += "_extracted" + } + mval := strings.TrimSpace(parts[1]) + if !model.LabelValue(mval).IsValid() { + if Debug { + level.Debug(m.logger).Log("msg", "invalid value parsed from message", "value", mval) + } + continue + } + extracted[mkey] = mval + } + if Debug { + level.Debug(m.logger).Log("msg", "extracted data debug in event_log_message stage", + "extracted data", fmt.Sprintf("%v", extracted)) + } + return nil +} + +func (m *eventLogMessageStage) Name() string { + return StageTypeEventLogMessage +} + +// Sanitize a input string to convert it into a valid prometheus label +// TODO: switch to prometheus/prometheus/util/strutil/SanitizeFullLabelName +func SanitizeFullLabelName(input string) string { + if len(input) == 0 { + return "_" + } + var validSb strings.Builder + for i, b := range input { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + validSb.WriteRune('_') + } else { + validSb.WriteRune(b) + } + } + return validSb.String() +} diff --git a/clients/pkg/logentry/stages/eventlogmessage_test.go b/clients/pkg/logentry/stages/eventlogmessage_test.go new file mode 100644 index 000000000000..4729d5a08f0e --- /dev/null +++ b/clients/pkg/logentry/stages/eventlogmessage_test.go @@ -0,0 +1,395 @@ +package stages + +import ( + "fmt" + "regexp" + "strings" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + + util_log "github.com/grafana/loki/pkg/util/log" +) + +var testEvtLogMsgYamlDefaults = ` +pipeline_stages: +- eventlogmessage: {} +` + +var testEvtLogMsgYamlCustomSource = ` +pipeline_stages: +- eventlogmessage: + source: Message +` + +var testEvtLogMsgYamlDropInvalidLabels = ` +pipeline_stages: +- eventlogmessage: + drop_invalid_labels: true +` + +var testEvtLogMsgYamlOverwriteExisting = ` +pipeline_stages: +- eventlogmessage: + overwrite_existing: true +` + +var ( + testEvtLogMsgSimple = "Key1: Value 1\r\nKey2: Value 2\r\nKey3: Value: 3" + testEvtLogMsgInvalidLabels = "Key 1: Value 1\r\n0Key2: Value 2\r\nKey@3: Value 3\r\n: Value 4" + testEvtLogMsgOverwriteTest = "test: new value" +) + +func TestEventLogMessage_simple(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + config string + sourcekey string + msgdata string + extractedValues map[string]interface{} + }{ + "successfully ran a pipeline with sample event log message stage using default source": { + testEvtLogMsgYamlDefaults, + "message", + testEvtLogMsgSimple, + map[string]interface{}{ + "Key1": "Value 1", + "Key2": "Value 2", + "Key3": "Value: 3", + "test": "existing value", + }, + }, + "successfully ran a pipeline with sample event log message stage using custom source": { + testEvtLogMsgYamlCustomSource, + "Message", + testEvtLogMsgSimple, + map[string]interface{}{ + "Key1": "Value 1", + "Key2": "Value 2", + "Key3": "Value: 3", + "test": "existing value", + }, + }, + "successfully ran a pipeline with sample event log message stage containing invalid labels": { + testEvtLogMsgYamlDefaults, + "message", + testEvtLogMsgInvalidLabels, + map[string]interface{}{ + "Key_1": "Value 1", + "_Key2": "Value 2", + "Key_3": "Value 3", + "_": "Value 4", + "test": "existing value", + }, + }, + "successfully ran a pipeline with sample event log message stage without overwriting existing labels": { + testEvtLogMsgYamlDefaults, + "message", + testEvtLogMsgOverwriteTest, + map[string]interface{}{ + "test": "existing value", + "test_extracted": "new value", + }, + }, + "successfully ran a pipeline with sample event log message stage overwriting existing labels": { + testEvtLogMsgYamlOverwriteExisting, + "message", + testEvtLogMsgOverwriteTest, + map[string]interface{}{ + "test": "new value", + }, + }, + } + + for testName, testData := range tests { + testData := testData + testData.extractedValues[testData.sourcekey] = testData.msgdata + + t.Run(testName, func(t *testing.T) { + t.Parallel() + + pl, err := NewPipeline(util_log.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) + assert.NoError(t, err, "Expected pipeline creation to not result in error") + out := processEntries(pl, + newEntry(map[string]interface{}{ + testData.sourcekey: testData.msgdata, + "test": "existing value", + }, nil, testData.msgdata, time.Now()))[0] + assert.Equal(t, testData.extractedValues, out.Extracted) + }) + } +} + +func TestEventLogMessageConfig_validate(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + config interface{} + err error + }{ + "invalid config": { + map[string]interface{}{ + "source": 1, + }, + errors.New("1 error(s) decoding:\n\n* 'source' expected type 'string', got unconvertible type 'int', value: '1'"), + }, + "invalid source": { + map[string]interface{}{ + "source": "the message", + }, + fmt.Errorf(ErrInvalidLabelName, "the message"), + }, + "empty source": { + map[string]interface{}{ + "source": "", + }, + fmt.Errorf(ErrInvalidLabelName, ""), + }, + } + for tName, tt := range tests { + tt := tt + t.Run(tName, func(t *testing.T) { + _, err := newEventLogMessageStage(util_log.Logger, tt.config) + if tt.err != nil { + assert.NotNil(t, err, "EventLogMessage.validate() expected error = %v, but got nil", tt.err) + } + if err != nil { + assert.Equal(t, tt.err.Error(), err.Error(), "EventLogMessage.validate() expected error = %v, actual error = %v", tt.err, err) + } + }) + } +} + +var testEvtLogMsgNetworkConn = "Network connection detected:\r\nRuleName: Usermode\r\n" + + "UtcTime: 2023-01-31 08:07:23.782\r\nProcessGuid: {44ffd2c7-cc3a-63d8-2002-000000000d00}\r\n" + + "ProcessId: 7344\r\nImage: C:\\Users\\User\\promtail\\promtail-windows-amd64.exe\r\n" + + "User: WINTEST2211\\User\r\nProtocol: tcp\r\nInitiated: true\r\nSourceIsIpv6: false\r\n" + + "SourceIp: 10.0.2.15\r\nSourceHostname: WinTest2211..\r\nSourcePort: 49992\r\n" + + "SourcePortName: -\r\nDestinationIsIpv6: false\r\nDestinationIp: 34.117.8.58\r\n" + + "DestinationHostname: 58.8.117.34.bc.googleusercontent.com\r\nDestinationPort: 443\r\n" + + "DestinationPortName: https" + +func TestEventLogMessage_Real(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + config string + sourcekey string + msgdata string + extractedValues map[string]interface{} + }{ + "successfully ran a pipeline with network event log message stage using default source": { + testEvtLogMsgYamlDefaults, + "message", + testEvtLogMsgNetworkConn, + map[string]interface{}{ + "Network_connection_detected": "", + "RuleName": "Usermode", + "UtcTime": "2023-01-31 08:07:23.782", + "ProcessGuid": "{44ffd2c7-cc3a-63d8-2002-000000000d00}", + "ProcessId": "7344", + "Image": "C:\\Users\\User\\promtail\\promtail-windows-amd64.exe", + "User": "WINTEST2211\\User", + "Protocol": "tcp", + "Initiated": "true", + "SourceIsIpv6": "false", + "SourceIp": "10.0.2.15", + "SourceHostname": "WinTest2211..", + "SourcePort": "49992", + "SourcePortName": "-", + "DestinationIsIpv6": "false", + "DestinationIp": "34.117.8.58", + "DestinationHostname": "58.8.117.34.bc.googleusercontent.com", + "DestinationPort": "443", + "DestinationPortName": "https", + }, + }, + "successfully ran a pipeline with network event log message stage using custom source": { + testEvtLogMsgYamlCustomSource, + "Message", + testEvtLogMsgNetworkConn, + map[string]interface{}{ + "Network_connection_detected": "", + "RuleName": "Usermode", + "UtcTime": "2023-01-31 08:07:23.782", + "ProcessGuid": "{44ffd2c7-cc3a-63d8-2002-000000000d00}", + "ProcessId": "7344", + "Image": "C:\\Users\\User\\promtail\\promtail-windows-amd64.exe", + "User": "WINTEST2211\\User", + "Protocol": "tcp", + "Initiated": "true", + "SourceIsIpv6": "false", + "SourceIp": "10.0.2.15", + "SourceHostname": "WinTest2211..", + "SourcePort": "49992", + "SourcePortName": "-", + "DestinationIsIpv6": "false", + "DestinationIp": "34.117.8.58", + "DestinationHostname": "58.8.117.34.bc.googleusercontent.com", + "DestinationPort": "443", + "DestinationPortName": "https", + }, + }, + "successfully ran a pipeline with network event log message stage dropping invalid labels": { + testEvtLogMsgYamlDropInvalidLabels, + "message", + testEvtLogMsgNetworkConn, + map[string]interface{}{ + "RuleName": "Usermode", + "UtcTime": "2023-01-31 08:07:23.782", + "ProcessGuid": "{44ffd2c7-cc3a-63d8-2002-000000000d00}", + "ProcessId": "7344", + "Image": "C:\\Users\\User\\promtail\\promtail-windows-amd64.exe", + "User": "WINTEST2211\\User", + "Protocol": "tcp", + "Initiated": "true", + "SourceIsIpv6": "false", + "SourceIp": "10.0.2.15", + "SourceHostname": "WinTest2211..", + "SourcePort": "49992", + "SourcePortName": "-", + "DestinationIsIpv6": "false", + "DestinationIp": "34.117.8.58", + "DestinationHostname": "58.8.117.34.bc.googleusercontent.com", + "DestinationPort": "443", + "DestinationPortName": "https", + }, + }, + } + + for testName, testData := range tests { + testData := testData + testData.extractedValues[testData.sourcekey] = testData.msgdata + + t.Run(testName, func(t *testing.T) { + t.Parallel() + + pl, err := NewPipeline(util_log.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) + assert.NoError(t, err, "Expected pipeline creation to not result in error") + out := processEntries(pl, + newEntry(map[string]interface{}{testData.sourcekey: testData.msgdata}, nil, testData.msgdata, time.Now()))[0] + assert.Equal(t, testData.extractedValues, out.Extracted) + }) + } +} + +var ( + testEvtLogMsgInvalidStructure = "\n\rwhat; is this?\n\r" + testEvtLogMsgInvalidValue = "Key1: " + string([]byte{0xff, 0xfe, 0xfd}) +) + +func TestEventLogMessage_invalid(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + config string + sourcekey string + msgdata string + extractedValues map[string]interface{} + }{ + "successfully ran a pipeline with an invalid event log message": { + testEvtLogMsgYamlDefaults, + "message", + testEvtLogMsgInvalidStructure, + map[string]interface{}{}, + }, + "successfully ran a pipeline with sample event log message stage on the wrong default source": { + testEvtLogMsgYamlDefaults, + "notmessage", + testEvtLogMsgSimple, + map[string]interface{}{}, + }, + "successfully ran a pipeline with sample event log message stage dropping invalid labels": { + testEvtLogMsgYamlDropInvalidLabels, + "message", + testEvtLogMsgInvalidLabels, + map[string]interface{}{}, + }, + "successfully ran a pipeline with an invalid event log message value (not UTF-8)": { + testEvtLogMsgYamlDefaults, + "message", + testEvtLogMsgInvalidValue, + map[string]interface{}{}, + }, + } + + for testName, testData := range tests { + testData := testData + testData.extractedValues[testData.sourcekey] = testData.msgdata + + t.Run(testName, func(t *testing.T) { + t.Parallel() + + pl, err := NewPipeline(util_log.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer) + assert.NoError(t, err, "Expected pipeline creation to not result in error") + out := processEntries(pl, + newEntry(map[string]interface{}{testData.sourcekey: testData.msgdata}, nil, testData.msgdata, time.Now()))[0] + assert.Equal(t, testData.extractedValues, out.Extracted) + }) + } +} + +func TestEventLogMessage_invalidString(t *testing.T) { + t.Parallel() + + pl, err := NewPipeline(util_log.Logger, loadConfig(testEvtLogMsgYamlDefaults), nil, prometheus.DefaultRegisterer) + assert.NoError(t, err, "Expected pipeline creation to not result in error") + out := processEntries(pl, + newEntry(map[string]interface{}{"message": nil}, nil, "", time.Now())) + assert.Len(t, out, 0, "No output should be produced with a nil input") +} + +var ( + inputJustKey = "Key 1:" + inputBoth = "Key 1: Value 1" + RegexSplitKeyValue = regexp.MustCompile(": ?") +) + +func BenchmarkSplittingKeyValuesRegex(b *testing.B) { + for i := 0; i < b.N; i++ { + var val string + resultKey := RegexSplitKeyValue.Split(inputJustKey, 2) + if len(resultKey) > 1 { + val = resultKey[1] + } + resultKeyValue := RegexSplitKeyValue.Split(inputBoth, 2) + if len(resultKeyValue) > 1 { + val = resultKeyValue[1] + } + _ = val + } +} + +func BenchmarkSplittingKeyValuesSplitTrim(b *testing.B) { + for i := 0; i < b.N; i++ { + var val string + resultKey := strings.SplitN(inputJustKey, ":", 2) + if len(resultKey) > 1 { + val = strings.TrimSpace(resultKey[1]) + } + resultKeyValue := strings.SplitN(inputBoth, ":", 2) + if len(resultKey) > 1 { + val = strings.TrimSpace(resultKeyValue[1]) + } + _ = val + } +} + +func BenchmarkSplittingKeyValuesSplitSubstr(b *testing.B) { + for i := 0; i < b.N; i++ { + var val string + resultKey := strings.SplitN(inputJustKey, ":", 2) + if len(resultKey) > 1 && len(resultKey[1]) > 0 { + val = resultKey[1][1:] + } + resultKeyValue := strings.SplitN(inputBoth, ":", 2) + if len(resultKey) > 1 && len(resultKey[1]) > 0 { + val = resultKeyValue[1][1:] + } + _ = val + } +} diff --git a/clients/pkg/logentry/stages/extensions.go b/clients/pkg/logentry/stages/extensions.go index 08abf20cc236..a89dde4fbab2 100644 --- a/clients/pkg/logentry/stages/extensions.go +++ b/clients/pkg/logentry/stages/extensions.go @@ -6,6 +6,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" ) const ( @@ -43,7 +44,7 @@ func NewDocker(logger log.Logger, registerer prometheus.Registerer) (Stage, erro type cri struct { // bounded buffer for CRI-O Partial logs lines (identified with tag `P` till we reach first `F`) - partialLines []string + partialLines map[model.Fingerprint]Entry maxPartialLines int base *Pipeline } @@ -57,26 +58,44 @@ func (c *cri) Name() string { func (c *cri) Run(entry chan Entry) chan Entry { entry = c.base.Run(entry) - in := RunWithSkip(entry, func(e Entry) (Entry, bool) { + in := RunWithSkipOrSendMany(entry, func(e Entry) ([]Entry, bool) { + fingerprint := e.Labels.Fingerprint() + + // We received partial-line (tag: "P") if e.Extracted["flags"] == "P" { - if len(c.partialLines) >= c.maxPartialLines { + if len(c.partialLines) > c.maxPartialLines { // Merge existing partialLines - newPartialLine := e.Line - e.Line = strings.Join(c.partialLines, "") + entries := make([]Entry, 0, len(c.partialLines)) + for _, v := range c.partialLines { + entries = append(entries, v) + } + level.Warn(c.base.logger).Log("msg", "cri stage: partial lines upperbound exceeded. merging it to single line", "threshold", MaxPartialLinesSize) - c.partialLines = c.partialLines[:0] - c.partialLines = append(c.partialLines, newPartialLine) - return e, false + + c.partialLines = make(map[model.Fingerprint]Entry) + c.partialLines[fingerprint] = e + + return entries, false } - c.partialLines = append(c.partialLines, e.Line) - return e, true + + prev, ok := c.partialLines[fingerprint] + if ok { + e.Line = strings.Join([]string{prev.Line, e.Line}, "") + } + c.partialLines[fingerprint] = e + + return []Entry{e}, true // it's a partial-line so skip it. } - if len(c.partialLines) > 0 { - c.partialLines = append(c.partialLines, e.Line) - e.Line = strings.Join(c.partialLines, "") - c.partialLines = c.partialLines[:0] + + // Now we got full-line (tag: "F"). + // 1. If any old partialLines matches with this full-line stream, merge it + // 2. Else just return the full line. + prev, ok := c.partialLines[fingerprint] + if ok { + e.Line = strings.Join([]string{prev.Line, e.Line}, "") + delete(c.partialLines, fingerprint) } - return e, false + return []Entry{e}, false }) return in @@ -122,6 +141,6 @@ func NewCRI(logger log.Logger, registerer prometheus.Registerer) (Stage, error) maxPartialLines: MaxPartialLinesSize, base: p, } - c.partialLines = make([]string, 0, c.maxPartialLines) + c.partialLines = make(map[model.Fingerprint]Entry) return &c, nil } diff --git a/clients/pkg/logentry/stages/extensions_test.go b/clients/pkg/logentry/stages/extensions_test.go index bb6bfe30de6a..3f908ca5749d 100644 --- a/clients/pkg/logentry/stages/extensions_test.go +++ b/clients/pkg/logentry/stages/extensions_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -88,49 +89,77 @@ var ( criTestTime2 = time.Now() ) +type testEntry struct { + labels model.LabelSet + line string +} + func TestCRI_tags(t *testing.T) { cases := []struct { name string lines []string expected []string maxPartialLines int + entries []testEntry err error }{ { name: "tag F", - lines: []string{ - "2019-05-07T18:57:50.904275087+00:00 stdout F some full line", - "2019-05-07T18:57:55.904275087+00:00 stdout F log", + entries: []testEntry{ + {line: "2019-05-07T18:57:50.904275087+00:00 stdout F some full line", labels: model.LabelSet{"foo": "bar"}}, + {line: "2019-05-07T18:57:55.904275087+00:00 stdout F log", labels: model.LabelSet{"foo": "bar"}}, }, expected: []string{"some full line", "log"}, }, { - name: "tag P", - lines: []string{ - "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 1 ", - "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 2 ", - "2019-05-07T18:57:55.904275087+00:00 stdout F log finished", - "2019-05-07T18:57:55.904275087+00:00 stdout F another full log", + name: "tag P multi-stream", + entries: []testEntry{ + {line: "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 1 ", labels: model.LabelSet{"foo": "bar"}}, + {line: "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 2 ", labels: model.LabelSet{"foo": "bar2"}}, + {line: "2019-05-07T18:57:55.904275087+00:00 stdout F log finished", labels: model.LabelSet{"foo": "bar"}}, + {line: "2019-05-07T18:57:55.904275087+00:00 stdout F another full log", labels: model.LabelSet{"foo": "bar2"}}, + }, + expected: []string{ + "partial line 1 log finished", // belongs to stream `{foo="bar"}` + "partial line 2 another full log", // belongs to stream `{foo="bar2"} + }, + }, + { + name: "tag P multi-stream with maxPartialLines exceeded", + entries: []testEntry{ + {line: "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 1 ", labels: model.LabelSet{"label1": "val1", "label2": "val2"}}, + + {line: "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 2 ", labels: model.LabelSet{"label1": "val1"}}, + {line: "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 3 ", labels: model.LabelSet{"label1": "val1", "label2": "val2"}}, + {line: "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 4 ", labels: model.LabelSet{"label1": "val3"}}, + {line: "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 5 ", labels: model.LabelSet{"label1": "val4"}}, // exceeded maxPartialLines as already 3 streams in flight. + {line: "2019-05-07T18:57:55.904275087+00:00 stdout F log finished", labels: model.LabelSet{"label1": "val1", "label2": "val2"}}, + {line: "2019-05-07T18:57:55.904275087+00:00 stdout F another full log", labels: model.LabelSet{"label1": "val3"}}, + {line: "2019-05-07T18:57:55.904275087+00:00 stdout F yet an another full log", labels: model.LabelSet{"label1": "val4"}}, }, + maxPartialLines: 2, expected: []string{ - "partial line 1 partial line 2 log finished", + "partial line 1 partial line 3 ", + "partial line 2 ", + "partial line 4 ", + "log finished", "another full log", + "partial line 5 yet an another full log", }, }, { - name: "tag P exceeding MaxPartialLinesSize lines", - lines: []string{ - "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 1 ", - "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 2 ", - "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 3", - "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 4 ", // this exceeds the `MaxPartialLinesSize` of 3 - "2019-05-07T18:57:55.904275087+00:00 stdout F log finished", - "2019-05-07T18:57:55.904275087+00:00 stdout F another full log", + name: "tag P single stream", + entries: []testEntry{ + {line: "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 1 ", labels: model.LabelSet{"foo": "bar"}}, + {line: "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 2 ", labels: model.LabelSet{"foo": "bar"}}, + {line: "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 3 ", labels: model.LabelSet{"foo": "bar"}}, + {line: "2019-05-07T18:57:50.904275087+00:00 stdout P partial line 4 ", labels: model.LabelSet{"foo": "bar"}}, // this exceeds the `MaxPartialLinesSize` of 3 + {line: "2019-05-07T18:57:55.904275087+00:00 stdout F log finished", labels: model.LabelSet{"foo": "bar"}}, + {line: "2019-05-07T18:57:55.904275087+00:00 stdout F another full log", labels: model.LabelSet{"foo": "bar"}}, }, maxPartialLines: 3, expected: []string{ - "partial line 1 partial line 2 partial line 3", - "partial line 4 log finished", + "partial line 1 partial line 2 partial line 3 partial line 4 log finished", "another full log", }, }, @@ -148,16 +177,26 @@ func TestCRI_tags(t *testing.T) { p.(*cri).maxPartialLines = tt.maxPartialLines } - for _, line := range tt.lines { - out := processEntries(p, newEntry(nil, nil, line, time.Now())) + for _, entry := range tt.entries { + out := processEntries(p, newEntry(nil, entry.labels, entry.line, time.Now())) if len(out) > 0 { for _, en := range out { got = append(got, en.Line) - } } } - assert.Equal(t, tt.expected, got) + + expectedMap := make(map[string]bool) + for _, v := range tt.expected { + expectedMap[v] = true + } + + gotMap := make(map[string]bool) + for _, v := range got { + gotMap[v] = true + } + + assert.Equal(t, expectedMap, gotMap) }) } } diff --git a/clients/pkg/logentry/stages/geoip.go b/clients/pkg/logentry/stages/geoip.go new file mode 100644 index 000000000000..a45ef3223826 --- /dev/null +++ b/clients/pkg/logentry/stages/geoip.go @@ -0,0 +1,241 @@ +package stages + +import ( + "fmt" + "net" + "reflect" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/mitchellh/mapstructure" + "github.com/oschwald/geoip2-golang" + "github.com/pkg/errors" + "github.com/prometheus/common/model" +) + +const ( + ErrEmptyGeoIPStageConfig = "geoip stage config cannot be empty" + ErrEmptyDBPathGeoIPStageConfig = "db path cannot be empty" + ErrEmptySourceGeoIPStageConfig = "source cannot be empty" + ErrEmptyDBTypeGeoIPStageConfig = "db type should be either city or asn" +) + +type GeoIPFields int + +const ( + CITYNAME GeoIPFields = iota + COUNTRYNAME + CONTINENTNAME + CONTINENTCODE + LOCATION + POSTALCODE + TIMEZONE + SUBDIVISIONNAME + SUBDIVISIONCODE +) + +var fields = map[GeoIPFields]string{ + CITYNAME: "geoip_city_name", + COUNTRYNAME: "geoip_country_name", + CONTINENTNAME: "geoip_continet_name", + CONTINENTCODE: "geoip_continent_code", + LOCATION: "geoip_location", + POSTALCODE: "geoip_postal_code", + TIMEZONE: "geoip_timezone", + SUBDIVISIONNAME: "geoip_subdivision_name", + SUBDIVISIONCODE: "geoip_subdivision_code", +} + +// GeoIPConfig represents GeoIP stage config +type GeoIPConfig struct { + DB string `mapstructure:"db"` + Source *string `mapstructure:"source"` + DBType string `mapstructure:"db_type"` +} + +func validateGeoIPConfig(c *GeoIPConfig) error { + if c == nil { + return errors.New(ErrEmptyGeoIPStageConfig) + } + + if c.DB == "" { + return errors.New(ErrEmptyDBPathGeoIPStageConfig) + } + + if c.Source != nil && *c.Source == "" { + return errors.New(ErrEmptySourceGeoIPStageConfig) + } + + if c.DBType == "" { + return errors.New(ErrEmptyDBTypeGeoIPStageConfig) + } + + return nil +} + +func newGeoIPStage(logger log.Logger, configs interface{}) (Stage, error) { + cfgs := &GeoIPConfig{} + err := mapstructure.Decode(configs, cfgs) + if err != nil { + return nil, err + } + + err = validateGeoIPConfig(cfgs) + if err != nil { + return nil, err + } + + db, err := geoip2.Open(cfgs.DB) + if err != nil { + return nil, err + } + + return &geoIPStage{ + db: db, + logger: logger, + cfgs: cfgs, + }, nil +} + +type geoIPStage struct { + logger log.Logger + db *geoip2.Reader + cfgs *GeoIPConfig +} + +// Run implements Stage +func (g *geoIPStage) Run(in chan Entry) chan Entry { + out := make(chan Entry) + go func() { + defer close(out) + defer g.close() + for e := range in { + g.process(e.Labels, e.Extracted, &e.Timestamp, &e.Entry.Line) + out <- e + } + }() + return out +} + +// Name implements Stage +func (g *geoIPStage) Name() string { + return StageTypeGeoIP +} + +func (g *geoIPStage) process(labels model.LabelSet, extracted map[string]interface{}, t *time.Time, entry *string) { + var ip net.IP + if g.cfgs.Source != nil { + if _, ok := extracted[*g.cfgs.Source]; !ok { + if Debug { + level.Debug(g.logger).Log("msg", "source does not exist in the set of extracted values", "source", *g.cfgs.Source) + } + return + } + + value, err := getString(extracted[*g.cfgs.Source]) + if err != nil { + if Debug { + level.Debug(g.logger).Log("msg", "failed to convert source value to string", "source", *g.cfgs.Source, "err", err, "type", reflect.TypeOf(extracted[*g.cfgs.Source])) + } + return + } + ip = net.ParseIP(value) + } + switch g.cfgs.DBType { + case "city": + record, err := g.db.City(ip) + if err != nil { + level.Error(g.logger).Log("msg", "unable to get City record for the ip", "err", err, "ip", ip) + return + } + g.populateLabelsWithCityData(labels, record) + case "asn": + record, err := g.db.ASN(ip) + if err != nil { + level.Error(g.logger).Log("msg", "unable to get ASN record for the ip", "err", err, "ip", ip) + return + } + g.populateLabelsWithASNData(labels, record) + default: + level.Error(g.logger).Log("msg", "unknown database type") + } +} + +func (g *geoIPStage) close() { + if err := g.db.Close(); err != nil { + level.Error(g.logger).Log("msg", "error while closing geoip db", "err", err) + } +} + +func (g *geoIPStage) populateLabelsWithCityData(labels model.LabelSet, record *geoip2.City) { + for field, label := range fields { + switch field { + case CITYNAME: + cityName := record.City.Names["en"] + if cityName != "" { + labels[model.LabelName(label)] = model.LabelValue(cityName) + } + case COUNTRYNAME: + contryName := record.Country.Names["en"] + if contryName != "" { + labels[model.LabelName(label)] = model.LabelValue(contryName) + } + case CONTINENTNAME: + continentName := record.Continent.Names["en"] + if continentName != "" { + labels[model.LabelName(label)] = model.LabelValue(continentName) + } + case CONTINENTCODE: + continentCode := record.Continent.Code + if continentCode != "" { + labels[model.LabelName(label)] = model.LabelValue(continentCode) + } + case POSTALCODE: + postalCode := record.Postal.Code + if postalCode != "" { + labels[model.LabelName(label)] = model.LabelValue(postalCode) + } + case TIMEZONE: + timezone := record.Location.TimeZone + if timezone != "" { + labels[model.LabelName(label)] = model.LabelValue(timezone) + } + case LOCATION: + latitude := record.Location.Latitude + longitude := record.Location.Longitude + if latitude != 0 || longitude != 0 { + labels[model.LabelName(fmt.Sprintf("%s_latitude", label))] = model.LabelValue(fmt.Sprint(latitude)) + labels[model.LabelName(fmt.Sprintf("%s_longitude", label))] = model.LabelValue(fmt.Sprint(longitude)) + } + case SUBDIVISIONNAME: + if len(record.Subdivisions) > 0 { + // we get most specific subdivision https://dev.maxmind.com/release-note/most-specific-subdivision-attribute-added/ + subdivisionName := record.Subdivisions[len(record.Subdivisions)-1].Names["en"] + if subdivisionName != "" { + labels[model.LabelName(label)] = model.LabelValue(subdivisionName) + } + } + case SUBDIVISIONCODE: + if len(record.Subdivisions) > 0 { + subdivisionCode := record.Subdivisions[len(record.Subdivisions)-1].IsoCode + if subdivisionCode != "" { + labels[model.LabelName(label)] = model.LabelValue(subdivisionCode) + } + } + default: + level.Error(g.logger).Log("msg", "unknown geoip field") + } + } +} + +func (g *geoIPStage) populateLabelsWithASNData(labels model.LabelSet, record *geoip2.ASN) { + autonomousSystemNumber := record.AutonomousSystemNumber + autonomousSystemOrganization := record.AutonomousSystemOrganization + if autonomousSystemNumber != 0 { + labels[model.LabelName("geoip_autonomous_system_number")] = model.LabelValue(fmt.Sprint(autonomousSystemNumber)) + } + if autonomousSystemOrganization != "" { + labels[model.LabelName("geoip_autonomous_system_organization")] = model.LabelValue(autonomousSystemOrganization) + } +} diff --git a/clients/pkg/logentry/stages/geoip_test.go b/clients/pkg/logentry/stages/geoip_test.go new file mode 100644 index 000000000000..fb0653e6fff8 --- /dev/null +++ b/clients/pkg/logentry/stages/geoip_test.go @@ -0,0 +1,55 @@ +package stages + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func Test_ValidateConfigs(t *testing.T) { + source := "ip" + tests := []struct { + config GeoIPConfig + wantError error + }{ + { + GeoIPConfig{ + DB: "test", + Source: &source, + DBType: "city", + }, + nil, + }, + { + GeoIPConfig{ + Source: &source, + DBType: "city", + }, + errors.New(ErrEmptyDBPathGeoIPStageConfig), + }, + { + GeoIPConfig{ + DB: "test", + DBType: "city", + }, + errors.New(ErrEmptySourceGeoIPStageConfig), + }, + { + GeoIPConfig{ + DB: "test", + Source: &source, + }, + errors.New(ErrEmptyDBTypeGeoIPStageConfig), + }, + } + for _, tt := range tests { + err := validateGeoIPConfig(&tt.config) + if err != nil { + require.Equal(t, tt.wantError.Error(), err.Error()) + } + if tt.wantError == nil { + require.Nil(t, err) + } + } +} diff --git a/clients/pkg/logentry/stages/match.go b/clients/pkg/logentry/stages/match.go index e714fa91555a..3b4addbb0de1 100644 --- a/clients/pkg/logentry/stages/match.go +++ b/clients/pkg/logentry/stages/match.go @@ -1,13 +1,12 @@ package stages import ( - "github.com/prometheus/prometheus/model/labels" - "github.com/go-kit/log" "github.com/mitchellh/mapstructure" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/clients/pkg/logentry/logql" ) diff --git a/clients/pkg/logentry/stages/pipeline.go b/clients/pkg/logentry/stages/pipeline.go index b0a897e3054b..c20a7784c511 100644 --- a/clients/pkg/logentry/stages/pipeline.go +++ b/clients/pkg/logentry/stages/pipeline.go @@ -91,6 +91,25 @@ func RunWithSkip(input chan Entry, process func(e Entry) (Entry, bool)) chan Ent return out } +// RunWithSkiporSendMany same as RunWithSkip, except it can either skip sending it to output channel, if `process` functions returns `skip` true. Or send many entries. +func RunWithSkipOrSendMany(input chan Entry, process func(e Entry) ([]Entry, bool)) chan Entry { + out := make(chan Entry) + go func() { + defer close(out) + for e := range input { + results, skip := process(e) + if skip { + continue + } + for _, result := range results { + out <- result + } + } + }() + + return out +} + // Run implements Stage func (p *Pipeline) Run(in chan Entry) chan Entry { in = RunWith(in, func(e Entry) Entry { diff --git a/clients/pkg/logentry/stages/stage.go b/clients/pkg/logentry/stages/stage.go index d119b6c82625..410016a79598 100644 --- a/clients/pkg/logentry/stages/stage.go +++ b/clients/pkg/logentry/stages/stage.go @@ -15,28 +15,30 @@ import ( ) const ( - StageTypeJSON = "json" - StageTypeLogfmt = "logfmt" - StageTypeRegex = "regex" - StageTypeReplace = "replace" - StageTypeMetric = "metrics" - StageTypeLabel = "labels" - StageTypeLabelDrop = "labeldrop" - StageTypeTimestamp = "timestamp" - StageTypeOutput = "output" - StageTypeDocker = "docker" - StageTypeCRI = "cri" - StageTypeMatch = "match" - StageTypeTemplate = "template" - StageTypePipeline = "pipeline" - StageTypeTenant = "tenant" - StageTypeDrop = "drop" - StageTypeLimit = "limit" - StageTypeMultiline = "multiline" - StageTypePack = "pack" - StageTypeLabelAllow = "labelallow" - StageTypeStaticLabels = "static_labels" - StageTypeDecolorize = "decolorize" + StageTypeJSON = "json" + StageTypeLogfmt = "logfmt" + StageTypeRegex = "regex" + StageTypeReplace = "replace" + StageTypeMetric = "metrics" + StageTypeLabel = "labels" + StageTypeLabelDrop = "labeldrop" + StageTypeTimestamp = "timestamp" + StageTypeOutput = "output" + StageTypeDocker = "docker" + StageTypeCRI = "cri" + StageTypeMatch = "match" + StageTypeTemplate = "template" + StageTypePipeline = "pipeline" + StageTypeTenant = "tenant" + StageTypeDrop = "drop" + StageTypeLimit = "limit" + StageTypeMultiline = "multiline" + StageTypePack = "pack" + StageTypeLabelAllow = "labelallow" + StageTypeStaticLabels = "static_labels" + StageTypeDecolorize = "decolorize" + StageTypeEventLogMessage = "eventlogmessage" + StageTypeGeoIP = "geoip" ) // Processor takes an existing set of labels, timestamp and log entry and returns either a possibly mutated @@ -215,6 +217,16 @@ func New(logger log.Logger, jobName *string, stageType string, if err != nil { return nil, err } + case StageTypeEventLogMessage: + s, err = newEventLogMessageStage(logger, cfg) + if err != nil { + return nil, err + } + case StageTypeGeoIP: + s, err = newGeoIPStage(logger, cfg) + if err != nil { + return nil, err + } default: return nil, errors.Errorf("Unknown stage type: %s", stageType) } diff --git a/clients/pkg/promtail/client/client_test.go b/clients/pkg/promtail/client/client_test.go index c6475040fccc..35f02c2cf57f 100644 --- a/clients/pkg/promtail/client/client_test.go +++ b/clients/pkg/promtail/client/client_test.go @@ -3,9 +3,7 @@ package client import ( "fmt" "io" - "math" "net/http" - "net/http/httptest" "net/url" "strings" "testing" @@ -24,7 +22,6 @@ import ( "github.com/grafana/loki/clients/pkg/promtail/api" "github.com/grafana/loki/pkg/logproto" - "github.com/grafana/loki/pkg/util" lokiflag "github.com/grafana/loki/pkg/util/flagext" ) @@ -38,11 +35,6 @@ var logEntries = []api.Entry{ {Labels: model.LabelSet{}, Entry: logproto.Entry{Timestamp: time.Unix(6, 0).UTC(), Line: "line0123456789"}}, } -type receivedReq struct { - tenantID string - pushReq logproto.PushRequest -} - func TestClient_Handle(t *testing.T) { tests := map[string]struct { clientBatchSize int @@ -500,7 +492,7 @@ func TestClient_Handle(t *testing.T) { receivedReqsChan := make(chan receivedReq, 10) // Start a local HTTP server - server := httptest.NewServer(createServerHandler(receivedReqsChan, testData.serverResponseStatus)) + server := newTestRemoteWriteServer(receivedReqsChan, testData.serverResponseStatus) require.NotNil(t, server) defer server.Close() @@ -642,7 +634,7 @@ func TestClient_StopNow(t *testing.T) { receivedReqsChan := make(chan receivedReq, 10) // Start a local HTTP server - server := httptest.NewServer(createServerHandler(receivedReqsChan, c.serverResponseStatus)) + server := newTestRemoteWriteServer(receivedReqsChan, c.serverResponseStatus) require.NotNil(t, server) defer server.Close() @@ -710,24 +702,6 @@ func TestClient_StopNow(t *testing.T) { } } -func createServerHandler(receivedReqsChan chan receivedReq, status int) http.HandlerFunc { - return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - // Parse the request - var pushReq logproto.PushRequest - if err := util.ParseProtoReader(req.Context(), req.Body, int(req.ContentLength), math.MaxInt32, &pushReq, util.RawSnappy); err != nil { - rw.WriteHeader(500) - return - } - - receivedReqsChan <- receivedReq{ - tenantID: req.Header.Get("X-Scope-OrgID"), - pushReq: pushReq, - } - - rw.WriteHeader(status) - }) -} - type RoundTripperFunc func(*http.Request) (*http.Response, error) func (r RoundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) { diff --git a/clients/pkg/promtail/client/manager.go b/clients/pkg/promtail/client/manager.go new file mode 100644 index 000000000000..c6df8391b636 --- /dev/null +++ b/clients/pkg/promtail/client/manager.go @@ -0,0 +1,108 @@ +package client + +import ( + "fmt" + "strings" + "sync" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/loki/clients/pkg/promtail/api" + "github.com/grafana/loki/clients/pkg/promtail/wal" +) + +// Manager manages remote write client instantiation, and connects the related components to orchestrate the flow of api.Entry +// from the scrape targets, to the remote write clients themselves. +// +// Right now it just supports instantiating the WAL writer side of the future-to-be WAL enabled client. In follow-up +// work, tracked in https://github.com/grafana/loki/issues/8197, this Manager will be responsible for instantiating all client +// types: Logger, Multi and WAL. +type Manager struct { + clients []Client + + entries chan api.Entry + once sync.Once + + wg sync.WaitGroup +} + +// NewManager creates a new Manager +func NewManager(metrics *Metrics, logger log.Logger, maxStreams, maxLineSize int, maxLineSizeTruncate bool, reg prometheus.Registerer, walCfg wal.Config, clientCfgs ...Config) (*Manager, error) { + // TODO: refactor this to instantiate all clients types + var fake struct{} + + if len(clientCfgs) == 0 { + return nil, fmt.Errorf("at least one client config should be provided") + } + clientsCheck := make(map[string]struct{}) + clients := make([]Client, 0, len(clientCfgs)) + for _, cfg := range clientCfgs { + client, err := New(metrics, cfg, maxStreams, maxLineSize, maxLineSizeTruncate, logger) + if err != nil { + return nil, err + } + + // Don't allow duplicate clients, we have client specific metrics that need at least one unique label value (name). + if _, ok := clientsCheck[client.Name()]; ok { + return nil, fmt.Errorf("duplicate client configs are not allowed, found duplicate for name: %s", cfg.Name) + } + + clientsCheck[client.Name()] = fake + clients = append(clients, client) + } + + manager := &Manager{ + clients: clients, + entries: make(chan api.Entry), + } + manager.start() + return manager, nil +} + +func (m *Manager) start() { + m.wg.Add(1) + go func() { + defer m.wg.Done() + // keep reading received entries + for e := range m.entries { + // then fanout to every remote write client + for _, c := range m.clients { + c.Chan() <- e + } + } + }() +} + +func (m *Manager) StopNow() { + for _, c := range m.clients { + c.StopNow() + } +} + +func (m *Manager) Name() string { + var sb strings.Builder + // name contains wal since manager is used as client only when WAL enabled for now + sb.WriteString("wal:") + for i, c := range m.clients { + sb.WriteString(c.Name()) + if i != len(m.clients)-1 { + sb.WriteString(",") + } + } + return sb.String() +} + +func (m *Manager) Chan() chan<- api.Entry { + return m.entries +} + +func (m *Manager) Stop() { + // first stop the receiving channel + m.once.Do(func() { close(m.entries) }) + m.wg.Wait() + // close clients + for _, c := range m.clients { + c.Stop() + } +} diff --git a/clients/pkg/promtail/client/manager_test.go b/clients/pkg/promtail/client/manager_test.go new file mode 100644 index 000000000000..4c56f04b1c06 --- /dev/null +++ b/clients/pkg/promtail/client/manager_test.go @@ -0,0 +1,108 @@ +package client + +import ( + "net/http" + "net/url" + "os" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/grafana/dskit/backoff" + "github.com/grafana/dskit/flagext" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/clients/pkg/promtail/api" + "github.com/grafana/loki/clients/pkg/promtail/wal" + "github.com/grafana/loki/pkg/logproto" +) + +func TestManager_ErrorCreatingWhenNoClientConfigsProvided(t *testing.T) { + walDir := t.TempDir() + _, err := NewManager(nil, log.NewLogfmtLogger(os.Stdout), 0, 0, false, prometheus.NewRegistry(), wal.Config{ + Dir: walDir, + Enabled: true, + }) + require.Error(t, err) +} + +type closer interface { + Close() +} + +type closerFunc func() + +func (c closerFunc) Close() { + c() +} + +func newServerAncClientConfig(t *testing.T) (Config, chan receivedReq, closer) { + receivedReqsChan := make(chan receivedReq, 10) + + // Start a local HTTP server + server := newTestRemoteWriteServer(receivedReqsChan, http.StatusOK) + require.NotNil(t, server) + + testClientURL, _ := url.Parse(server.URL) + testClientConfig := Config{ + Name: "test-client", + URL: flagext.URLValue{URL: testClientURL}, + Timeout: time.Second * 2, + BatchSize: 1, + BackoffConfig: backoff.Config{ + MaxRetries: 0, + }, + } + return testClientConfig, receivedReqsChan, closerFunc(func() { + server.Close() + close(receivedReqsChan) + }) +} + +func TestManager_EntriesAreWrittenToClients(t *testing.T) { + walDir := t.TempDir() + reg := prometheus.NewRegistry() + testClientConfig, rwReceivedReqs, closeServer := newServerAncClientConfig(t) + clientMetrics := NewMetrics(reg) + manager, err := NewManager(clientMetrics, log.NewLogfmtLogger(os.Stdout), 0, 0, false, reg, wal.Config{ + Dir: walDir, + Enabled: true, + }, testClientConfig) + require.NoError(t, err) + require.Equal(t, "wal:test-client", manager.Name()) + + var testLabels = model.LabelSet{ + "wal_enabled": "true", + } + var lines = []string{ + "Lorem ipsum dolor sit amet, consectetur adipiscing elit.", + "In eu nisl ac massa ultricies rutrum.", + "Sed eget felis at ipsum auctor congue.", + } + for _, line := range lines { + manager.Chan() <- api.Entry{ + Labels: testLabels, + Entry: logproto.Entry{ + Timestamp: time.Now(), + Line: line, + }, + } + } + + // stop client to flush WAL, stop rw server + manager.Stop() + closeServer.Close() + + // assert over rw client received entries + rwSeenEntriesCount := 0 + for req := range rwReceivedReqs { + rwSeenEntriesCount++ + require.Len(t, req.pushReq.Streams, 1, "expected 1 stream requests to be received") + require.Len(t, req.pushReq.Streams[0].Entries, 1, "expected 1 entry in the only stream received per request") + require.Equal(t, `{wal_enabled="true"}`, req.pushReq.Streams[0].Labels) + require.Contains(t, lines, req.pushReq.Streams[0].Entries[0].Line) + } + require.Equal(t, 3, rwSeenEntriesCount) +} diff --git a/clients/pkg/promtail/client/test_server.go b/clients/pkg/promtail/client/test_server.go new file mode 100644 index 000000000000..3883173f9c23 --- /dev/null +++ b/clients/pkg/promtail/client/test_server.go @@ -0,0 +1,40 @@ +package client + +import ( + "math" + "net/http" + "net/http/httptest" + + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/util" +) + +type receivedReq struct { + tenantID string + pushReq logproto.PushRequest +} + +// newTestMoreWriteServer creates a new httpserver.Server that can handle remote write request. When a request is handled, +// the received entries are written to receivedChan, and status is responded. +func newTestRemoteWriteServer(receivedChan chan receivedReq, status int) *httptest.Server { + server := httptest.NewServer(createServerHandler(receivedChan, status)) + return server +} + +func createServerHandler(receivedReqsChan chan receivedReq, receivedOKStatus int) http.HandlerFunc { + return func(rw http.ResponseWriter, req *http.Request) { + // Parse the request + var pushReq logproto.PushRequest + if err := util.ParseProtoReader(req.Context(), req.Body, int(req.ContentLength), math.MaxInt32, &pushReq, util.RawSnappy); err != nil { + rw.WriteHeader(500) + return + } + + receivedReqsChan <- receivedReq{ + tenantID: req.Header.Get("X-Scope-OrgID"), + pushReq: pushReq, + } + + rw.WriteHeader(receivedOKStatus) + } +} diff --git a/clients/pkg/promtail/config/config.go b/clients/pkg/promtail/config/config.go index 51f4f811ee53..65437f58e5ad 100644 --- a/clients/pkg/promtail/config/config.go +++ b/clients/pkg/promtail/config/config.go @@ -7,9 +7,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" dskit_flagext "github.com/grafana/dskit/flagext" - - "github.com/grafana/loki/pkg/tracing" - "gopkg.in/yaml.v2" "github.com/grafana/loki/clients/pkg/promtail/client" @@ -18,7 +15,8 @@ import ( "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" "github.com/grafana/loki/clients/pkg/promtail/server" "github.com/grafana/loki/clients/pkg/promtail/targets/file" - + "github.com/grafana/loki/clients/pkg/promtail/wal" + "github.com/grafana/loki/pkg/tracing" "github.com/grafana/loki/pkg/util/flagext" ) @@ -39,6 +37,7 @@ type Config struct { LimitsConfig limit.Config `yaml:"limits_config,omitempty"` Options Options `yaml:"options,omitempty"` Tracing tracing.Config `yaml:"tracing"` + WAL wal.Config `yaml:"wal"` } // RegisterFlags with prefix registers flags where every name is prefixed by diff --git a/clients/pkg/promtail/promtail.go b/clients/pkg/promtail/promtail.go index d8e2deb47a0f..23a18d6b2327 100644 --- a/clients/pkg/promtail/promtail.go +++ b/clients/pkg/promtail/promtail.go @@ -8,21 +8,28 @@ import ( "os/signal" "sync" "syscall" + "time" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/grafana/loki/clients/pkg/logentry/stages" + "github.com/grafana/loki/clients/pkg/promtail/api" "github.com/grafana/loki/clients/pkg/promtail/client" "github.com/grafana/loki/clients/pkg/promtail/config" "github.com/grafana/loki/clients/pkg/promtail/server" "github.com/grafana/loki/clients/pkg/promtail/targets" "github.com/grafana/loki/clients/pkg/promtail/targets/target" - + "github.com/grafana/loki/clients/pkg/promtail/utils" + "github.com/grafana/loki/clients/pkg/promtail/wal" util_log "github.com/grafana/loki/pkg/util/log" ) +const ( + timeoutUntilFanoutHardStop = time.Second * 30 +) + var reloadSuccessTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "promtail", Name: "config_reload_success_total", @@ -58,6 +65,8 @@ func WithRegisterer(reg prometheus.Registerer) Option { // Promtail is the root struct for Promtail. type Promtail struct { client client.Client + walWriter *wal.Writer + entriesFanout api.EntryHandler targetManagers *targets.TargetManagers server server.Server logger log.Logger @@ -133,12 +142,38 @@ func (p *Promtail) reloadConfig(cfg *config.Config) error { stages.SetReadLineRateLimiter(cfg.LimitsConfig.ReadlineRate, cfg.LimitsConfig.ReadlineBurst, cfg.LimitsConfig.ReadlineRateDrop) } var err error + // entryHandlers contains all sinks were scraped log entries should get to + var entryHandlers = []api.EntryHandler{} + + // TODO: Refactor all client instantiation inside client.Manager if p.dryRun { p.client, err = client.NewLogger(p.metrics, p.logger, cfg.ClientConfigs...) if err != nil { return err } cfg.PositionsConfig.ReadOnly = true + } else if cfg.WAL.Enabled { + p.client, err = client.NewManager( + p.metrics, + p.logger, + cfg.LimitsConfig.MaxStreams, + cfg.LimitsConfig.MaxLineSize.Val(), + cfg.LimitsConfig.MaxLineSizeTruncate, + p.reg, + cfg.WAL, + cfg.ClientConfigs..., + ) + if err != nil { + return err + } + + p.walWriter, err = wal.NewWriter(cfg.WAL, p.logger, p.reg) + if err != nil { + return fmt.Errorf("failed to create wal writer: %w", err) + } + // If wal is enabled, the walWriter should be a target for scraped entries as well as the remote-write client, + // at least until we implement the wal reader side (https://github.com/grafana/loki/pull/8302). + entryHandlers = append(entryHandlers, p.walWriter) } else { p.client, err = client.NewMulti(p.metrics, p.logger, cfg.LimitsConfig.MaxStreams, cfg.LimitsConfig.MaxLineSize.Val(), cfg.LimitsConfig.MaxLineSizeTruncate, cfg.ClientConfigs...) if err != nil { @@ -146,7 +181,10 @@ func (p *Promtail) reloadConfig(cfg *config.Config) error { } } - tms, err := targets.NewTargetManagers(p, p.reg, p.logger, cfg.PositionsConfig, p.client, cfg.ScrapeConfig, &cfg.TargetConfig) + entryHandlers = append(entryHandlers, p.client) + p.entriesFanout = utils.NewFanoutEntryHandler(timeoutUntilFanoutHardStop, entryHandlers...) + + tms, err := targets.NewTargetManagers(p, p.reg, p.logger, cfg.PositionsConfig, p.entriesFanout, cfg.ScrapeConfig, &cfg.TargetConfig) if err != nil { return err } @@ -196,6 +234,12 @@ func (p *Promtail) Shutdown() { if p.targetManagers != nil { p.targetManagers.Stop() } + if p.entriesFanout != nil { + p.entriesFanout.Stop() + } + if p.walWriter != nil { + p.walWriter.Stop() + } // todo work out the stop. p.client.Stop() } diff --git a/clients/pkg/promtail/targets/docker/target.go b/clients/pkg/promtail/targets/docker/target.go index 6acca9046bcf..8db16775f9f9 100644 --- a/clients/pkg/promtail/targets/docker/target.go +++ b/clients/pkg/promtail/targets/docker/target.go @@ -203,7 +203,7 @@ func (t *Target) process(r io.Reader, logStream string) { lb.Set(string(k), string(v)) } lb.Set(dockerLabelLogStream, logStream) - processed := relabel.Process(lb.Labels(nil), t.relabelConfig...) + processed, _ := relabel.Process(lb.Labels(nil), t.relabelConfig...) filtered := make(model.LabelSet) for _, lbl := range processed { diff --git a/clients/pkg/promtail/targets/docker/target_group.go b/clients/pkg/promtail/targets/docker/target_group.go index b3ce893ec70b..0b0ea9eef6f5 100644 --- a/clients/pkg/promtail/targets/docker/target_group.go +++ b/clients/pkg/promtail/targets/docker/target_group.go @@ -2,15 +2,21 @@ package docker import ( "fmt" + "net/http" + "net/url" "sync" + "time" "github.com/docker/docker/client" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/relabel" + "github.com/grafana/loki/pkg/util/build" + "github.com/grafana/loki/clients/pkg/promtail/api" "github.com/grafana/loki/clients/pkg/promtail/positions" "github.com/grafana/loki/clients/pkg/promtail/targets/target" @@ -20,14 +26,16 @@ const DockerSource = "Docker" // targetGroup manages all container targets of one Docker daemon. type targetGroup struct { - metrics *Metrics - logger log.Logger - positions positions.Positions - entryHandler api.EntryHandler - defaultLabels model.LabelSet - relabelConfig []*relabel.Config - host string - client client.APIClient + metrics *Metrics + logger log.Logger + positions positions.Positions + entryHandler api.EntryHandler + defaultLabels model.LabelSet + relabelConfig []*relabel.Config + host string + httpClientConfig config.HTTPClientConfig + client client.APIClient + refreshInterval model.Duration mtx sync.Mutex targets map[string]*Target @@ -60,11 +68,36 @@ func (tg *targetGroup) sync(groups []*targetgroup.Group) { // addTarget checks whether the container with given id is already known. If not it's added to the this group func (tg *targetGroup) addTarget(id string, discoveredLabels model.LabelSet) error { if tg.client == nil { - var err error + hostURL, err := url.Parse(tg.host) + if err != nil { + return err + } + opts := []client.Opt{ client.WithHost(tg.host), client.WithAPIVersionNegotiation(), } + + // There are other protocols than HTTP supported by the Docker daemon, like + // unix, which are not supported by the HTTP client. Passing HTTP client + // options to the Docker client makes those non-HTTP requests fail. + if hostURL.Scheme == "http" || hostURL.Scheme == "https" { + rt, err := config.NewRoundTripperFromConfig(tg.httpClientConfig, "docker_sd") + if err != nil { + return err + } + opts = append(opts, + client.WithHTTPClient(&http.Client{ + Transport: rt, + Timeout: time.Duration(tg.refreshInterval), + }), + client.WithScheme(hostURL.Scheme), + client.WithHTTPHeaders(map[string]string{ + "User-Agent": fmt.Sprintf("loki-promtail/%s", build.Version), + }), + ) + } + tg.client, err = client.NewClientWithOpts(opts...) if err != nil { level.Error(tg.logger).Log("msg", "could not create new Docker client", "err", err) diff --git a/clients/pkg/promtail/targets/docker/targetmanager.go b/clients/pkg/promtail/targets/docker/targetmanager.go index 66b79f020629..a35a2aa79f19 100644 --- a/clients/pkg/promtail/targets/docker/targetmanager.go +++ b/clients/pkg/promtail/targets/docker/targetmanager.go @@ -73,14 +73,16 @@ func NewTargetManager( _, ok := tm.groups[syncerKey] if !ok { tm.groups[syncerKey] = &targetGroup{ - metrics: metrics, - logger: logger, - positions: positions, - targets: make(map[string]*Target), - entryHandler: pipeline.Wrap(pushClient), - defaultLabels: model.LabelSet{}, - relabelConfig: cfg.RelabelConfigs, - host: sdConfig.Host, + metrics: metrics, + logger: logger, + positions: positions, + targets: make(map[string]*Target), + entryHandler: pipeline.Wrap(pushClient), + defaultLabels: model.LabelSet{}, + relabelConfig: cfg.RelabelConfigs, + host: sdConfig.Host, + httpClientConfig: sdConfig.HTTPClientConfig, + refreshInterval: sdConfig.RefreshInterval, } } configs[syncerKey] = append(configs[syncerKey], sdConfig) diff --git a/clients/pkg/promtail/targets/file/filetargetmanager.go b/clients/pkg/promtail/targets/file/filetargetmanager.go index f72f316993b3..ba47f605c598 100644 --- a/clients/pkg/promtail/targets/file/filetargetmanager.go +++ b/clients/pkg/promtail/targets/file/filetargetmanager.go @@ -300,18 +300,18 @@ func (s *targetSyncer) sync(groups []*targetgroup.Group, targetEventHandler chan labelMap[string(k)] = string(v) } - processedLabels := relabel.Process(labels.FromMap(labelMap), s.relabelConfig...) + processedLabels, keep := relabel.Process(labels.FromMap(labelMap), s.relabelConfig...) var labels = make(model.LabelSet) for k, v := range processedLabels.Map() { labels[model.LabelName(k)] = model.LabelValue(v) } - // Drop empty targets (drop in relabeling). - if processedLabels == nil { - dropped = append(dropped, target.NewDroppedTarget("dropping target, no labels", discoveredLabels)) - level.Debug(s.log).Log("msg", "dropping target, no labels") - s.metrics.failedTargets.WithLabelValues("empty_labels").Inc() + // Drop targets if instructed to drop in relabeling. + if !keep { + dropped = append(dropped, target.NewDroppedTarget("dropped target", discoveredLabels)) + level.Debug(s.log).Log("msg", "dropped target") + s.metrics.failedTargets.WithLabelValues("dropped").Inc() continue } diff --git a/clients/pkg/promtail/targets/gcplog/formatter.go b/clients/pkg/promtail/targets/gcplog/formatter.go index fa9bd64a178a..9954b83bd6cb 100644 --- a/clients/pkg/promtail/targets/gcplog/formatter.go +++ b/clients/pkg/promtail/targets/gcplog/formatter.go @@ -60,7 +60,7 @@ func parseGCPLogsEntry(data []byte, other model.LabelSet, otherInternal labels.L // apply relabeling if len(relabelConfig) > 0 { - processed = relabel.Process(lbs.Labels(nil), relabelConfig...) + processed, _ = relabel.Process(lbs.Labels(nil), relabelConfig...) } else { processed = lbs.Labels(nil) } diff --git a/clients/pkg/promtail/targets/gelf/gelftarget.go b/clients/pkg/promtail/targets/gelf/gelftarget.go index b24e7d406c58..d1ff79f1838b 100644 --- a/clients/pkg/promtail/targets/gelf/gelftarget.go +++ b/clients/pkg/promtail/targets/gelf/gelftarget.go @@ -122,7 +122,7 @@ func (t *Target) handleMessage(msg *gelf.Message) { lb.Set("__gelf_message_version", msg.Version) lb.Set("__gelf_message_facility", msg.Facility) - processed := relabel.Process(lb.Labels(nil), t.relabelConfig...) + processed, _ := relabel.Process(lb.Labels(nil), t.relabelConfig...) filtered := make(model.LabelSet) for _, lbl := range processed { diff --git a/clients/pkg/promtail/targets/heroku/target.go b/clients/pkg/promtail/targets/heroku/target.go index 8cc7bc25cd2f..38884a937bba 100644 --- a/clients/pkg/promtail/targets/heroku/target.go +++ b/clients/pkg/promtail/targets/heroku/target.go @@ -129,7 +129,7 @@ func (h *Target) drain(w http.ResponseWriter, r *http.Request) { lb.Set(lokiClient.ReservedLabelTenantID, tenantIDHeaderValue) } - processed := relabel.Process(lb.Labels(nil), h.relabelConfigs...) + processed, _ := relabel.Process(lb.Labels(nil), h.relabelConfigs...) // Start with the set of labels fixed in the configuration filtered := h.Labels().Clone() diff --git a/clients/pkg/promtail/targets/journal/journaltarget.go b/clients/pkg/promtail/targets/journal/journaltarget.go index c0b152d46529..8f6f76f2bcb0 100644 --- a/clients/pkg/promtail/targets/journal/journaltarget.go +++ b/clients/pkg/promtail/targets/journal/journaltarget.go @@ -304,7 +304,7 @@ func (t *JournalTarget) formatter(entry *sdjournal.JournalEntry) (string, error) entryLabels[string(k)] = string(v) } - processedLabels := relabel.Process(labels.FromMap(entryLabels), t.relabelConfig...) + processedLabels, _ := relabel.Process(labels.FromMap(entryLabels), t.relabelConfig...) processedLabelsMap := processedLabels.Map() labels := make(model.LabelSet, len(processedLabelsMap)) diff --git a/clients/pkg/promtail/targets/kafka/formatter.go b/clients/pkg/promtail/targets/kafka/formatter.go index 1ad1b5ad4019..b0f61e4332e3 100644 --- a/clients/pkg/promtail/targets/kafka/formatter.go +++ b/clients/pkg/promtail/targets/kafka/formatter.go @@ -14,7 +14,7 @@ func format(lbs labels.Labels, cfg []*relabel.Config) model.LabelSet { if len(lbs) == 0 { return nil } - processed := relabel.Process(lbs, cfg...) + processed, _ := relabel.Process(lbs, cfg...) labelOut := model.LabelSet(util.LabelsToMetric(processed)) for k := range labelOut { if strings.HasPrefix(string(k), "__") { diff --git a/clients/pkg/promtail/targets/lokipush/pushtarget.go b/clients/pkg/promtail/targets/lokipush/pushtarget.go index 14ab07e9a386..dcd75b52e6d4 100644 --- a/clients/pkg/promtail/targets/lokipush/pushtarget.go +++ b/clients/pkg/promtail/targets/lokipush/pushtarget.go @@ -89,6 +89,7 @@ func (t *PushTarget) run() error { t.server = srv t.server.HTTP.Path("/loki/api/v1/push").Methods("POST").Handler(http.HandlerFunc(t.handleLoki)) t.server.HTTP.Path("/promtail/api/v1/raw").Methods("POST").Handler(http.HandlerFunc(t.handlePlaintext)) + t.server.HTTP.Path("/ready").Methods("GET").Handler(http.HandlerFunc(t.ready)) go func() { err := srv.Run() @@ -126,8 +127,8 @@ func (t *PushTarget) handleLoki(w http.ResponseWriter, r *http.Request) { } // Apply relabeling - processed := relabel.Process(lb.Labels(nil), t.relabelConfig...) - if len(processed) == 0 { + processed, keep := relabel.Process(lb.Labels(nil), t.relabelConfig...) + if !keep || len(processed) == 0 { w.WriteHeader(http.StatusNoContent) return } @@ -234,3 +235,11 @@ func (t *PushTarget) Stop() error { t.handler.Stop() return nil } + +// ready function serves the ready endpoint +func (t *PushTarget) ready(w http.ResponseWriter, r *http.Request) { + resp := "ready" + if _, err := w.Write([]byte(resp)); err != nil { + level.Error(t.logger).Log("msg", "failed to respond to ready endoint", "err", err) + } +} diff --git a/clients/pkg/promtail/targets/lokipush/pushtarget_test.go b/clients/pkg/promtail/targets/lokipush/pushtarget_test.go index 815fc10400c6..1bcb397b78b3 100644 --- a/clients/pkg/promtail/targets/lokipush/pushtarget_test.go +++ b/clients/pkg/promtail/targets/lokipush/pushtarget_test.go @@ -4,6 +4,7 @@ import ( "bytes" "flag" "fmt" + "io" "net" "net/http" "os" @@ -200,3 +201,67 @@ func TestPlaintextPushTarget(t *testing.T) { _ = pt.Stop() } + +func TestReady(t *testing.T) { + w := log.NewSyncWriter(os.Stderr) + logger := log.NewLogfmtLogger(w) + + //Create PushTarget + eh := fake.New(func() {}) + defer eh.Stop() + + // Get a randomly available port by open and closing a TCP socket + addr, err := net.ResolveTCPAddr("tcp", localhost+":0") + require.NoError(t, err) + l, err := net.ListenTCP("tcp", addr) + require.NoError(t, err) + port := l.Addr().(*net.TCPAddr).Port + err = l.Close() + require.NoError(t, err) + + // Adjust some of the defaults + defaults := server.Config{} + defaults.RegisterFlags(flag.NewFlagSet("empty", flag.ContinueOnError)) + defaults.HTTPListenAddress = localhost + defaults.HTTPListenPort = port + defaults.GRPCListenAddress = localhost + defaults.GRPCListenPort = 0 // Not testing GRPC, a random port will be assigned + + config := &scrapeconfig.PushTargetConfig{ + Server: defaults, + Labels: model.LabelSet{ + "pushserver": "pushserver2", + "keepme": "label", + }, + KeepTimestamp: true, + } + + pt, err := NewPushTarget(logger, eh, []*relabel.Config{}, "job3", config) + require.NoError(t, err) + + url := fmt.Sprintf("http://%s:%d/ready", localhost, port) + response, err := http.Get(url) + if err != nil { + require.NoError(t, err) + } + body, err := io.ReadAll(response.Body) + if err != nil { + require.NoError(t, err) + } + responseCode := fmt.Sprint(response.StatusCode) + responseBody := string(body) + + fmt.Println(responseBody) + wantedResponse := "ready" + if responseBody != wantedResponse { + t.Errorf("got the response %q, want %q", responseBody, wantedResponse) + } + wantedCode := "200" + if responseCode != wantedCode { + t.Errorf("Got the response code %q, want %q", responseCode, wantedCode) + } + + t.Cleanup(func() { + _ = pt.Stop() + }) +} diff --git a/clients/pkg/promtail/targets/syslog/syslogtarget.go b/clients/pkg/promtail/targets/syslog/syslogtarget.go index 28a020a8e12c..81fc772e71f0 100644 --- a/clients/pkg/promtail/targets/syslog/syslogtarget.go +++ b/clients/pkg/promtail/targets/syslog/syslogtarget.go @@ -144,7 +144,7 @@ func (t *SyslogTarget) handleMessage(connLabels labels.Labels, msg syslog.Messag } } - processed := relabel.Process(lb.Labels(nil), t.relabelConfig...) + processed, _ := relabel.Process(lb.Labels(nil), t.relabelConfig...) filtered := make(model.LabelSet) for _, lbl := range processed { diff --git a/clients/pkg/promtail/targets/testutils/testutils.go b/clients/pkg/promtail/targets/testutils/testutils.go index 41444769c28d..b88e87b323dd 100644 --- a/clients/pkg/promtail/targets/testutils/testutils.go +++ b/clients/pkg/promtail/targets/testutils/testutils.go @@ -5,8 +5,10 @@ import ( "time" ) +var randomGenerator *rand.Rand + func InitRandom() { - rand.Seed(time.Now().UnixNano()) + randomGenerator = rand.New(rand.NewSource(time.Now().UnixNano())) } var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") @@ -14,7 +16,7 @@ var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") func RandName() string { b := make([]rune, 10) for i := range b { - b[i] = letters[rand.Intn(len(letters))] + b[i] = letters[randomGenerator.Intn(len(letters))] } return string(b) } diff --git a/clients/pkg/promtail/targets/windows/target.go b/clients/pkg/promtail/targets/windows/target.go index afc96dc3ad7f..5c2e4157c485 100644 --- a/clients/pkg/promtail/targets/windows/target.go +++ b/clients/pkg/promtail/targets/windows/target.go @@ -170,7 +170,7 @@ func (t *Target) renderEntries(events []win_eventlog.Event) []api.Entry { lbs.Set("computer", event.Computer) } // apply relabelings. - processed := relabel.Process(lbs.Labels(nil), t.relabelConfig...) + processed, _ := relabel.Process(lbs.Labels(nil), t.relabelConfig...) for _, lbl := range processed { if strings.HasPrefix(lbl.Name, "__") { diff --git a/clients/pkg/promtail/utils/entries.go b/clients/pkg/promtail/utils/entries.go new file mode 100644 index 000000000000..5cbb290c8ea1 --- /dev/null +++ b/clients/pkg/promtail/utils/entries.go @@ -0,0 +1,84 @@ +package utils + +import ( + "context" + "sync" + "time" + + "github.com/grafana/loki/clients/pkg/promtail/api" +) + +// FanoutEntryHandler implements api.EntryHandler, fanning out received entries to one or multiple channels. +type FanoutEntryHandler struct { + entries chan api.Entry + handlers []api.EntryHandler + + // using a single channel instead of a 1-valued wait group, to be able to wait on it + mainRoutineDone chan struct{} + once sync.Once + sendOnStopTimeout time.Duration + hardStop context.CancelFunc +} + +// NewFanoutEntryHandler creates a new FanoutEntryHandler. +func NewFanoutEntryHandler(sendTimeoutOnStop time.Duration, handlers ...api.EntryHandler) *FanoutEntryHandler { + ctx, cancel := context.WithCancel(context.Background()) + eh := &FanoutEntryHandler{ + entries: make(chan api.Entry), + handlers: handlers, + mainRoutineDone: make(chan struct{}), + sendOnStopTimeout: sendTimeoutOnStop, + hardStop: cancel, + } + eh.start(ctx) + return eh +} + +func (eh *FanoutEntryHandler) start(ctx context.Context) { + go func() { + defer func() { + close(eh.mainRoutineDone) + }() + + for e := range eh.entries { + // To prevent a single channel from blocking all others, we run each channel send in a separate go routine. + // This cause each entry to be sent in |eh.handlers| routines concurrently. When all finish, we know the entry + // has been fanned out properly, and we can read the next received entry. + var entryWG sync.WaitGroup + entryWG.Add(len(eh.handlers)) + for _, handler := range eh.handlers { + go func(ctx context.Context, eh api.EntryHandler) { + defer entryWG.Done() + // just doing a single select block here, since these routines are short-lived + select { + case <-ctx.Done(): + case eh.Chan() <- e: + } + }(ctx, handler) + } + entryWG.Wait() + } + }() +} + +func (eh *FanoutEntryHandler) Chan() chan<- api.Entry { + return eh.entries +} + +// Stop only stops the channel FanoutEntryHandler exposes. It then waits for the entry being processed to be sent succesfully. +// If it times out, it hard stops all sending routines. +func (eh *FanoutEntryHandler) Stop() { + eh.once.Do(func() { + close(eh.entries) + }) + // after closing channel, if the main go-routine was sending entries, wait for them to be sent, or a timeout fires + select { + case <-eh.mainRoutineDone: + // graceful stop + return + case <-time.After(eh.sendOnStopTimeout): + } + // sending pending entries timed out, hard stop + eh.hardStop() + <-eh.mainRoutineDone +} diff --git a/clients/pkg/promtail/utils/entries_test.go b/clients/pkg/promtail/utils/entries_test.go new file mode 100644 index 000000000000..4b8b9ebb3377 --- /dev/null +++ b/clients/pkg/promtail/utils/entries_test.go @@ -0,0 +1,115 @@ +package utils + +import ( + "sync" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/clients/pkg/promtail/api" + "github.com/grafana/loki/pkg/logproto" +) + +func TestFanoutEntryHandler_SuccessfulFanout(t *testing.T) { + eh1 := newSavingEntryHandler() + eh2 := newSavingEntryHandler() + fanout := NewFanoutEntryHandler(time.Second*10, eh1, eh2) + + defer func() { + fanout.Stop() + eh2.Stop() + eh1.Stop() + }() + + var expectedLines = []string{ + "some line", + "some other line", + "some other other line", + } + + for _, line := range expectedLines { + fanout.Chan() <- api.Entry{ + Labels: model.LabelSet{ + "test": "fanout", + }, + Entry: logproto.Entry{ + Timestamp: time.Now(), + Line: line, + }, + } + } + + require.Eventually(t, func() bool { + return len(eh1.Received) == len(expectedLines) && len(eh2.Received) == len(expectedLines) + }, time.Second*10, time.Second, "expected entries to be received by fanned out channels") +} + +type blockingEntryHanlder struct { + entries chan api.Entry +} + +func (b *blockingEntryHanlder) Chan() chan<- api.Entry { + return b.entries +} + +func (b *blockingEntryHanlder) Stop() { + close(b.entries) +} + +func TestFanoutEntryHandler_TimeoutWaitingForEntriesToBeSent(t *testing.T) { + eh1 := &blockingEntryHanlder{make(chan api.Entry)} + controlEH := newSavingEntryHandler() + fanout := NewFanoutEntryHandler(time.Second*2, eh1, controlEH) + + go func() { + fanout.Chan() <- api.Entry{ + Labels: model.LabelSet{ + "test": "fanout", + }, + Entry: logproto.Entry{ + Timestamp: time.Now(), + Line: "holis", + }, + } + }() + + require.Eventually(t, func() bool { + return len(controlEH.Received) == 1 + }, time.Second*5, time.Second, "expected control entry handler to receive an entry") + + now := time.Now() + fanout.Stop() + require.InDelta(t, time.Second*2, time.Since(now), float64(time.Millisecond*100), "expected fanout entry handler to stop before") +} + +type savingEntryHandler struct { + entries chan api.Entry + Received []api.Entry + wg sync.WaitGroup +} + +func newSavingEntryHandler() *savingEntryHandler { + eh := &savingEntryHandler{ + entries: make(chan api.Entry), + Received: []api.Entry{}, + } + eh.wg.Add(1) + go func() { + for e := range eh.entries { + eh.Received = append(eh.Received, e) + } + eh.wg.Done() + }() + return eh +} + +func (eh *savingEntryHandler) Chan() chan<- api.Entry { + return eh.entries +} + +func (eh *savingEntryHandler) Stop() { + close(eh.entries) + eh.wg.Wait() +} diff --git a/clients/pkg/promtail/wal/config.go b/clients/pkg/promtail/wal/config.go new file mode 100644 index 000000000000..e01090cb6795 --- /dev/null +++ b/clients/pkg/promtail/wal/config.go @@ -0,0 +1,34 @@ +package wal + +import ( + "time" +) + +const ( + defaultMaxSegmentAge = time.Hour +) + +// Config contains all WAL-related settings. +type Config struct { + // Whether WAL-support should be enabled. + // + // WAL support is a WIP. Do not enable in production setups until https://github.com/grafana/loki/issues/8197 + // is finished. + Enabled bool `yaml:"enabled"` + + // Path where the WAL is written to. + Dir string `yaml:"dir"` + + // MaxSegmentAge is threshold at which a WAL segment is considered old enough to be cleaned up. Default: 1h. + // + // Note that this functionality will likely be deprecated in favour of a programmatic cleanup mechanism. + MaxSegmentAge time.Duration `yaml:"cleanSegmentsOlderThan"` +} + +// UnmarshalYAML implement YAML Unmarshaler +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Apply defaults + c.MaxSegmentAge = defaultMaxSegmentAge + type plain Config + return unmarshal((*plain)(c)) +} diff --git a/clients/pkg/promtail/wal/reader.go b/clients/pkg/promtail/wal/reader.go new file mode 100644 index 000000000000..59555f2347e8 --- /dev/null +++ b/clients/pkg/promtail/wal/reader.go @@ -0,0 +1,60 @@ +package wal + +import ( + "fmt" + + "github.com/prometheus/common/model" + + "github.com/grafana/loki/clients/pkg/promtail/api" + "github.com/grafana/loki/pkg/ingester/wal" + "github.com/grafana/loki/pkg/util" + walUtils "github.com/grafana/loki/pkg/util/wal" +) + +// ReadWAL will read all entries in the WAL located under dir. Mainly used for testing +func ReadWAL(dir string) ([]api.Entry, error) { + reader, close, err := walUtils.NewWalReader(dir, -1) + if err != nil { + return nil, err + } + defer func() { close.Close() }() + + seenSeries := make(map[uint64]model.LabelSet) + seenEntries := []api.Entry{} + + for reader.Next() { + var walRec = wal.Record{} + bytes := reader.Record() + err = wal.DecodeRecord(bytes, &walRec) + if err != nil { + return nil, fmt.Errorf("error decoding wal record: %w", err) + } + + // first read series + for _, series := range walRec.Series { + if _, ok := seenSeries[uint64(series.Ref)]; !ok { + + seenSeries[uint64(series.Ref)] = util.MapToModelLabelSet(series.Labels.Map()) + } + } + + for _, entries := range walRec.RefEntries { + for _, entry := range entries.Entries { + labels, ok := seenSeries[uint64(entries.Ref)] + if !ok { + return nil, fmt.Errorf("found entry without matching series") + } + seenEntries = append(seenEntries, api.Entry{ + Labels: labels, + Entry: entry, + }) + } + } + + // reset entry + walRec.Series = walRec.Series[:] + walRec.RefEntries = walRec.RefEntries[:] + } + + return seenEntries, nil +} diff --git a/clients/pkg/promtail/wal/wal.go b/clients/pkg/promtail/wal/wal.go new file mode 100644 index 000000000000..3d54925cedb1 --- /dev/null +++ b/clients/pkg/promtail/wal/wal.go @@ -0,0 +1,106 @@ +package wal + +import ( + "fmt" + "os" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/tsdb/wlog" + + "github.com/grafana/loki/pkg/ingester/wal" +) + +var ( + recordPool = wal.NewRecordPool() +) + +// WAL is an interface that allows us to abstract ourselves from Prometheus WAL implementation. +type WAL interface { + // Log marshals the records and writes it into the WAL. + Log(*wal.Record) error + + Delete() error + Sync() error + Dir() string + Close() + NextSegment() (int, error) +} + +type wrapper struct { + wal *wlog.WL + log log.Logger +} + +// New creates a new wrapper, instantiating the actual wlog.WL underneath. +func New(cfg Config, log log.Logger, registerer prometheus.Registerer) (WAL, error) { + // TODO: We should fine-tune the WAL instantiated here to allow some buffering of written entries, but not written to disk + // yet. This will attest for the lack of buffering in the channel Writer exposes. + tsdbWAL, err := wlog.NewSize(log, registerer, cfg.Dir, wlog.DefaultSegmentSize, false) + if err != nil { + return nil, fmt.Errorf("failde to create tsdb WAL: %w", err) + } + return &wrapper{ + wal: tsdbWAL, + log: log, + }, nil +} + +// Close closes the underlying wal, flushing pending writes and closing the active segment. Safe to call more than once +func (w *wrapper) Close() { + // Avoid checking the error since it's safe to call Close more than once on wlog.WL + _ = w.wal.Close() +} + +func (w *wrapper) Delete() error { + err := w.wal.Close() + if err != nil { + level.Warn(w.log).Log("msg", "failed to close WAL", "err", err) + } + err = os.RemoveAll(w.wal.Dir()) + return err +} + +func (w *wrapper) Log(record *wal.Record) error { + if record == nil || (len(record.Series) == 0 && len(record.RefEntries) == 0) { + return nil + } + + buf := recordPool.GetBytes()[:0] + defer func() { + recordPool.PutBytes(buf) + }() + + // Always write series then entries. + if len(record.Series) > 0 { + buf = record.EncodeSeries(buf) + if err := w.wal.Log(buf); err != nil { + return err + } + buf = buf[:0] + } + if len(record.RefEntries) > 0 { + buf = record.EncodeEntries(wal.CurrentEntriesRec, buf) + if err := w.wal.Log(buf); err != nil { + return err + } + + } + return nil +} + +// Sync flushes changes to disk. Mainly to be used for testing. +func (w *wrapper) Sync() error { + return w.wal.Sync() +} + +// Dir returns the path to the WAL directory. +func (w *wrapper) Dir() string { + return w.wal.Dir() +} + +// NextSegment closes the current segment synchronously. Mainly used for testing. +func (w *wrapper) NextSegment() (int, error) { + return w.wal.NextSegmentSync() +} diff --git a/clients/pkg/promtail/wal/writer.go b/clients/pkg/promtail/wal/writer.go new file mode 100644 index 000000000000..b5eaa41c1229 --- /dev/null +++ b/clients/pkg/promtail/wal/writer.go @@ -0,0 +1,257 @@ +package wal + +import ( + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/record" + + "github.com/grafana/loki/clients/pkg/promtail/api" + + "github.com/grafana/loki/pkg/ingester/wal" + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/util" +) + +const ( + minimumCleanSegmentsEvery = time.Second +) + +// Writer implements api.EntryHandler, exposing a channel were scraping targets can write to. Reading from there, it +// writes incoming entries to a WAL. +// Also, since Writer is responsible for all changing operations over the WAL, therefore a routine is run for cleaning +// old segments. +type Writer struct { + entries chan api.Entry + log log.Logger + wg sync.WaitGroup + once sync.Once + wal WAL + entryWriter *entryWriter + + reclaimedOldSegmentsSpaceCounter *prometheus.CounterVec + + closeCleaner chan struct{} +} + +// NewWriter creates a new Writer. +func NewWriter(walCfg Config, logger log.Logger, reg prometheus.Registerer) (*Writer, error) { + // Start WAL + wl, err := New(Config{ + Dir: walCfg.Dir, + Enabled: true, + }, logger, reg) + if err != nil { + return nil, fmt.Errorf("error starting WAL: %w", err) + } + + wrt := &Writer{ + entries: make(chan api.Entry), + log: logger, + wg: sync.WaitGroup{}, + wal: wl, + entryWriter: newEntryWriter(), + closeCleaner: make(chan struct{}, 1), + } + + wrt.reclaimedOldSegmentsSpaceCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "promtail", + Subsystem: "wal_writer", + Name: "reclaimed_space", + Help: "Number of bytes reclaimed from storage.", + }, []string{}) + + if reg != nil { + _ = reg.Register(wrt.reclaimedOldSegmentsSpaceCounter) + } + + wrt.start(walCfg.MaxSegmentAge) + return wrt, nil +} + +func (wrt *Writer) start(maxSegmentAge time.Duration) { + wrt.wg.Add(1) + // main WAL writer routine + go func() { + defer wrt.wg.Done() + for e := range wrt.entries { + wrt.entryWriter.WriteEntry(e, wrt.wal, wrt.log) + } + }() + // WAL cleanup routine that cleans old segments + wrt.wg.Add(1) + go func() { + defer wrt.wg.Done() + // By cleaning every 10th of the configured threshold for considering a segment old, we are allowing a maximum slip + // of 10%. If the configured time is 1 hour, that'd be 6 minutes. + triggerEvery := maxSegmentAge / 10 + if triggerEvery < minimumCleanSegmentsEvery { + triggerEvery = minimumCleanSegmentsEvery + } + trigger := time.NewTicker(triggerEvery) + for { + select { + case <-trigger.C: + level.Debug(wrt.log).Log("msg", "Running wal old segments cleanup") + if err := wrt.cleanSegments(maxSegmentAge); err != nil { + level.Error(wrt.log).Log("msg", "Error cleaning old segments", "err", err) + } + break + case <-wrt.closeCleaner: + trigger.Stop() + return + } + } + }() +} + +func (wrt *Writer) Chan() chan<- api.Entry { + return wrt.entries +} + +func (wrt *Writer) Stop() { + wrt.once.Do(func() { + close(wrt.entries) + }) + // close cleaner routine + wrt.closeCleaner <- struct{}{} + // Wait for routine to write to wal all pending entries + wrt.wg.Wait() + // Close WAL to finalize all pending writes + wrt.wal.Close() +} + +// cleanSegments will remove segments older than maxAge from the WAL directory. If there's just one segment, none will be +// deleted since it's likely there's active readers on it. In case there's multiple segments, each will be deleted if: +// - It's not the last (highest numbered) segment +// - It's last modified date is older than the max allowed age +func (wrt *Writer) cleanSegments(maxAge time.Duration) error { + maxModifiedAt := time.Now().Add(-maxAge) + walDir := wrt.wal.Dir() + segments, err := listSegments(walDir) + if err != nil { + return fmt.Errorf("error reading segments in wal directory: %w", err) + } + // Only clean if there's more than one segment + if len(segments) <= 1 { + return nil + } + // find the most recent, or head segment to avoid cleaning it up + lastSegment := -1 + for _, segment := range segments { + if lastSegment < segment.number { + lastSegment = segment.number + } + } + for _, segment := range segments { + if segment.lastModified.Before(maxModifiedAt) && segment.number != lastSegment { + // segment is older than allowed age, cleaning up + if err := os.Remove(filepath.Join(walDir, segment.name)); err != nil { + level.Error(wrt.log).Log("msg", "Error old wal segment", "err", err, "segmentNum", segment.number) + } + level.Debug(wrt.log).Log("msg", "Deleted old wal segment", "segmentNum", segment.number) + wrt.reclaimedOldSegmentsSpaceCounter.WithLabelValues().Add(float64(segment.size)) + } + } + return nil +} + +// entryWriter writes api.Entry to a WAL, keeping in memory a single Record object that's reused +// across every write. +type entryWriter struct { + reusableWALRecord *wal.Record +} + +// newEntryWriter creates a new entryWriter. +func newEntryWriter() *entryWriter { + return &entryWriter{ + reusableWALRecord: &wal.Record{ + RefEntries: make([]wal.RefEntries, 0, 1), + Series: make([]record.RefSeries, 0, 1), + }, + } +} + +// WriteEntry writes an api.Entry to a WAL. Note that since it's re-using the same Record object for every +// write, it first has to be reset, and then overwritten accordingly. Therefore, WriteEntry is not thread-safe. +func (ew *entryWriter) WriteEntry(entry api.Entry, wl WAL, logger log.Logger) { + // Reset wal record slices + ew.reusableWALRecord.RefEntries = ew.reusableWALRecord.RefEntries[:0] + ew.reusableWALRecord.Series = ew.reusableWALRecord.Series[:0] + + defer func() { + err := wl.Log(ew.reusableWALRecord) + if err != nil { + level.Error(logger).Log("msg", "Failed to write entry to wal", "err", err) + } + }() + + var fp uint64 + lbs := labels.FromMap(util.ModelLabelSetToMap(entry.Labels)) + sort.Sort(lbs) + fp, _ = lbs.HashWithoutLabels(nil, []string(nil)...) + + // Append the entry to an already existing stream (if any) + ew.reusableWALRecord.RefEntries = append(ew.reusableWALRecord.RefEntries, wal.RefEntries{ + Ref: chunks.HeadSeriesRef(fp), + Entries: []logproto.Entry{ + entry.Entry, + }, + }) + ew.reusableWALRecord.Series = append(ew.reusableWALRecord.Series, record.RefSeries{ + Ref: chunks.HeadSeriesRef(fp), + Labels: lbs, + }) +} + +type segmentRef struct { + name string + number int + size int64 + lastModified time.Time +} + +// listSegments list wal segments under the given directory, alongside with some file system information for each. +func listSegments(dir string) (refs []segmentRef, err error) { + files, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + // the following will attempt to get segments info in a best effort manner, omitting file if error + for _, f := range files { + fn := f.Name() + k, err := strconv.Atoi(fn) + if err != nil { + continue + } + fileInfo, err := f.Info() + if err != nil { + continue + } + refs = append(refs, segmentRef{ + name: fn, + number: k, + lastModified: fileInfo.ModTime(), + size: fileInfo.Size(), + }) + } + sort.Slice(refs, func(i, j int) bool { + return refs[i].number < refs[j].number + }) + for i := 0; i < len(refs)-1; i++ { + if refs[i].number+1 != refs[i+1].number { + return nil, fmt.Errorf("segments are not sequential") + } + } + return refs, nil +} diff --git a/clients/pkg/promtail/wal/writer_test.go b/clients/pkg/promtail/wal/writer_test.go new file mode 100644 index 000000000000..0c900e5302a1 --- /dev/null +++ b/clients/pkg/promtail/wal/writer_test.go @@ -0,0 +1,201 @@ +package wal + +import ( + "os" + "path/filepath" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/clients/pkg/promtail/api" + "github.com/grafana/loki/pkg/logproto" +) + +func TestWriter_EntriesAreWrittenToWAL(t *testing.T) { + logger := log.NewLogfmtLogger(os.Stdout) + dir := t.TempDir() + + writer, err := NewWriter(Config{ + Dir: dir, + Enabled: true, + MaxSegmentAge: time.Minute, + }, logger, prometheus.NewRegistry()) + require.NoError(t, err) + defer func() { + writer.Stop() + }() + // write entries to wal and sync + + var testLabels = model.LabelSet{ + "testing": "log", + } + var lines = []string{ + "some line", + "some other line", + "some other other line", + } + + for _, line := range lines { + writer.Chan() <- api.Entry{ + Labels: testLabels, + Entry: logproto.Entry{ + Timestamp: time.Now(), + Line: line, + }, + } + } + + // accessing the WAL inside, just for testing! + require.NoError(t, writer.wal.Sync(), "failed to sync wal") + + // assert over WAL entries + readEntries, err := ReadWAL(dir) + require.NoError(t, err) + require.Len(t, readEntries, len(lines), "written lines and seen differ") + require.Equal(t, testLabels, readEntries[0].Labels) +} + +func TestWriter_OldSegmentsAreCleanedUp(t *testing.T) { + logger := level.NewFilter(log.NewLogfmtLogger(os.Stdout), level.AllowDebug()) + dir := t.TempDir() + + maxSegmentAge := time.Second * 2 + + writer, err := NewWriter(Config{ + Dir: dir, + Enabled: true, + MaxSegmentAge: maxSegmentAge, + }, logger, prometheus.NewRegistry()) + require.NoError(t, err) + defer func() { + writer.Stop() + }() + + // write entries to wal and sync + var testLabels = model.LabelSet{ + "testing": "log", + } + var lines = []string{ + "some line", + "some other line", + "some other other line", + } + + for _, line := range lines { + writer.Chan() <- api.Entry{ + Labels: testLabels, + Entry: logproto.Entry{ + Timestamp: time.Now(), + Line: line, + }, + } + } + + // accessing the WAL inside, just for testing! + require.NoError(t, writer.wal.Sync(), "failed to sync wal") + + // assert over WAL entries + readEntries, err := ReadWAL(dir) + require.NoError(t, err) + require.Len(t, readEntries, len(lines), "written lines and seen differ") + require.Equal(t, testLabels, readEntries[0].Labels) + + watchAndLogDirEntries(t, dir) + + // check segment is there + fileInfo, err := os.Stat(filepath.Join(dir, "00000000")) + require.NoError(t, err) + require.GreaterOrEqual(t, fileInfo.Size(), int64(0), "first segment size should be >= 0") + + // force close segment, so that one is eventually cleaned up + _, err = writer.wal.NextSegment() + require.NoError(t, err, "error closing current segment") + + // wait for segment to be cleaned + time.Sleep(maxSegmentAge * 2) + + watchAndLogDirEntries(t, dir) + + _, err = os.Stat(filepath.Join(dir, "00000000")) + require.Error(t, err) + require.ErrorIs(t, err, os.ErrNotExist, "expected file not exists error") + // Expect last, or "head" segment to still be alive + _, err = os.Stat(filepath.Join(dir, "00000001")) + require.NoError(t, err) +} + +func TestWriter_NoSegmentIsCleanedUpIfTheresOnlyOne(t *testing.T) { + logger := level.NewFilter(log.NewLogfmtLogger(os.Stdout), level.AllowDebug()) + dir := t.TempDir() + + maxSegmentAge := time.Second * 2 + + writer, err := NewWriter(Config{ + Dir: dir, + Enabled: true, + MaxSegmentAge: maxSegmentAge, + }, logger, prometheus.NewRegistry()) + require.NoError(t, err) + defer func() { + writer.Stop() + }() + + // write entries to wal and sync + var testLabels = model.LabelSet{ + "testing": "log", + } + var lines = []string{ + "some line", + } + + for _, line := range lines { + writer.Chan() <- api.Entry{ + Labels: testLabels, + Entry: logproto.Entry{ + Timestamp: time.Now(), + Line: line, + }, + } + } + + // accessing the WAL inside, just for testing! + require.NoError(t, writer.wal.Sync(), "failed to sync wal") + + // assert over WAL entries + readEntries, err := ReadWAL(dir) + require.NoError(t, err) + require.Len(t, readEntries, len(lines), "written lines and seen differ") + require.Equal(t, testLabels, readEntries[0].Labels) + + watchAndLogDirEntries(t, dir) + + // check segment is there + fileInfo, err := os.Stat(filepath.Join(dir, "00000000")) + require.NoError(t, err) + require.GreaterOrEqual(t, fileInfo.Size(), int64(0), "first segment size should be >= 0") + + // wait for segment to be cleaned + time.Sleep(maxSegmentAge * 2) + + watchAndLogDirEntries(t, dir) + + _, err = os.Stat(filepath.Join(dir, "00000000")) + require.NoError(t, err) +} + +func watchAndLogDirEntries(t *testing.T, path string) { + dirs, err := os.ReadDir(path) + if len(dirs) == 0 { + t.Log("no dirs found") + return + } + require.NoError(t, err) + for _, dir := range dirs { + t.Logf("dir entry found: %s", dir.Name()) + } +} diff --git a/cmd/logcli/Dockerfile b/cmd/logcli/Dockerfile index 117c0e15510b..c959e9bce593 100644 --- a/cmd/logcli/Dockerfile +++ b/cmd/logcli/Dockerfile @@ -1,10 +1,10 @@ -FROM golang:1.19.5 as build +FROM golang:1.20.1 as build COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false logcli -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN apk add --no-cache ca-certificates diff --git a/cmd/logcli/main.go b/cmd/logcli/main.go index 5c71e413bf46..599ea4502a7a 100644 --- a/cmd/logcli/main.go +++ b/cmd/logcli/main.go @@ -76,7 +76,74 @@ data points between the start and end query time. This output is used to build graphs, similar to what is seen in the Grafana Explore graph view. If you are querying metrics and just want the most recent data point (like what is seen in the Grafana Explore table view), then you should use -the "instant-query" command instead.`) +the "instant-query" command instead. + +Parallelization: + +You can download an unlimited number of logs in parallel, there are a few +flags which control this behaviour: + + --parallel-duration + --parallel-max-workers + --part-path-prefix + --overwrite-completed-parts + --merge-parts + --keep-parts + +Refer to the help for each flag for details about what each of them do. + +Example: + + logcli query + --timezone=UTC + --from="2021-01-19T10:00:00Z" + --to="2021-01-19T20:00:00Z" + --output=jsonl + --parallel-duration="15m" + --parallel-max-workers="4" + --part-path-prefix="/tmp/my_query" + --merge-parts + 'my-query' + +This example will create a queue of jobs to execute, each being 15 minutes in +duration. In this case, that means, for the 10-hour total duration, there will +be forty 15-minute jobs. The --limit flag is ignored. + +It will start four workers, and they will each take a job to work on from the +queue until all the jobs have been completed. + +Each job will save a "part" file to the location specified by the --part-path-prefix. +Different prefixes can be used to run multiple queries at the same time. +The timestamp of the start and end of the part is in the file name. +While the part is being downloaded, the filename will end in ".part", when it +is complete, the file will be renamed to remove this ".part" extension. +By default, if a completed part file is found, that part will not be downloaded +again. This can be overridden with the --overwrite-completed-parts flag. + +Part file example using the previous command, adding --keep-parts so they are +not deleted: + +Since we don't have the --forward flag, the parts will be downloaded in reverse. +Two of the workers have finished their jobs (last two files), and have picked +up the next jobs in the queue. +Running ls, this is what we should expect to see. + +$ ls -1 /tmp/my_query* +/tmp/my_query_20210119T183000_20210119T184500.part.tmp +/tmp/my_query_20210119T184500_20210119T190000.part.tmp +/tmp/my_query_20210119T190000_20210119T191500.part.tmp +/tmp/my_query_20210119T191500_20210119T193000.part.tmp +/tmp/my_query_20210119T193000_20210119T194500.part +/tmp/my_query_20210119T194500_20210119T200000.part + +If you do not specify the --merge-parts flag, the part files will be +downloaded, and logcli will exit, and you can process the files as you wish. +With the flag specified, the part files will be read in order, and the output +printed to the terminal. The lines will be printed as soon as the next part is +complete, you don't have to wait for all the parts to download before getting +output. The --merge-parts flag will remove the part files when it is done +reading each of them. To change this, you can use the --keep-parts flag, and +the part files will not be removed.`) rangeQuery = newQuery(false, queryCmd) tail = queryCmd.Flag("tail", "Tail the logs").Short('t').Default("false").Bool() follow = queryCmd.Flag("follow", "Alias for --tail").Short('f').Default("false").Bool() @@ -170,7 +237,7 @@ func main() { } // `--limit` doesn't make sense when using `--stdin` flag. - rangeQuery.Limit = math.MaxInt // TODO(kavi): is it a good idea? + rangeQuery.Limit = 0 } switch cmd { @@ -193,8 +260,12 @@ func main() { if *tail || *follow { rangeQuery.TailQuery(time.Duration(*delayFor)*time.Second, queryClient, out) - } else { + } else if rangeQuery.ParallelMaxWorkers == 1 { rangeQuery.DoQuery(queryClient, out, *statistics) + } else { + // `--limit` doesn't make sense when using parallelism. + rangeQuery.Limit = 0 + rangeQuery.DoQueryParallel(queryClient, out, *statistics) } case instantQueryCmd.FullCommand(): location, err := time.LoadLocation(*timezone) @@ -357,12 +428,18 @@ func newQuery(instant bool, cmd *kingpin.CmdClause) *query.Query { q.Start = mustParse(from, defaultStart) q.End = mustParse(to, defaultEnd) + + if q.ParallelMaxWorkers < 1 { + log.Println("parallel-max-workers must be greater than 0, defaulting to 1.") + q.ParallelMaxWorkers = 1 + } } q.Quiet = *quiet + return nil }) - cmd.Flag("limit", "Limit on number of entries to print.").Default("30").IntVar(&q.Limit) + cmd.Flag("limit", "Limit on number of entries to print. Setting it to 0 will fetch all entries.").Default("30").IntVar(&q.Limit) if instant { cmd.Arg("query", "eg 'rate({foo=\"bar\"} |~ \".*error.*\" [5m])'").Required().StringVar(&q.QueryString) cmd.Flag("now", "Time at which to execute the instant query.").StringVar(&now) @@ -374,7 +451,12 @@ func newQuery(instant bool, cmd *kingpin.CmdClause) *query.Query { cmd.Flag("step", "Query resolution step width, for metric queries. Evaluate the query at the specified step over the time range.").DurationVar(&q.Step) cmd.Flag("interval", "Query interval, for log queries. Return entries at the specified interval, ignoring those between. **This parameter is experimental, please see Issue 1779**").DurationVar(&q.Interval) cmd.Flag("batch", "Query batch size to use until 'limit' is reached").Default("1000").IntVar(&q.BatchSize) - + cmd.Flag("parallel-duration", "Split the range into jobs of this length to download the logs in parallel. This will result in the logs being out of order. Use --part-path-prefix to create a file per job to maintain ordering.").Default("1h").DurationVar(&q.ParallelDuration) + cmd.Flag("parallel-max-workers", "Max number of workers to start up for parallel jobs. A value of 1 will not create any parallel workers. When using parallel workers, limit is ignored.").Default("1").IntVar(&q.ParallelMaxWorkers) + cmd.Flag("part-path-prefix", "When set, each server response will be saved to a file with this prefix. Creates files in the format: 'prefix-utc_start-utc_end.part'. Intended to be used with the parallel-* flags so that you can combine the files to maintain ordering based on the filename. Default is to write to stdout.").StringVar(&q.PartPathPrefix) + cmd.Flag("overwrite-completed-parts", "Overwrites completed part files. This will download the range again, and replace the original completed part file. Default will skip a range if it's part file is already downloaded.").Default("false").BoolVar(&q.OverwriteCompleted) + cmd.Flag("merge-parts", "Reads the part files in order and writes the output to stdout. Original part files will be deleted with this option.").Default("false").BoolVar(&q.MergeParts) + cmd.Flag("keep-parts", "Overrides the default behaviour of --merge-parts which will delete the part files once all the files have been read. This option will keep the part files.").Default("false").BoolVar(&q.KeepParts) } cmd.Flag("forward", "Scan forwards through logs.").Default("false").BoolVar(&q.Forward) diff --git a/cmd/logql-analyzer/Dockerfile b/cmd/logql-analyzer/Dockerfile index db6061ff1517..a9593a51f21f 100644 --- a/cmd/logql-analyzer/Dockerfile +++ b/cmd/logql-analyzer/Dockerfile @@ -1,13 +1,13 @@ -FROM golang:1.19.5 as build +FROM golang:1.20.1 as build COPY . /src/loki WORKDIR /src/loki RUN make clean && CGO_ENABLED=0 go build ./cmd/logql-analyzer/ -FROM alpine:3.15.4 +FROM alpine:3.16.4 RUN apk add --no-cache ca-certificates COPY --from=build /src/loki/logql-analyzer /usr/bin/logql-analyzer -ENTRYPOINT [ "/usr/bin/logql-analyzer" ] \ No newline at end of file +ENTRYPOINT [ "/usr/bin/logql-analyzer" ] diff --git a/cmd/loki-canary/Dockerfile b/cmd/loki-canary/Dockerfile index 9355ef305f70..8217f356c54f 100644 --- a/cmd/loki-canary/Dockerfile +++ b/cmd/loki-canary/Dockerfile @@ -1,10 +1,10 @@ -FROM golang:1.19.5 as build +FROM golang:1.20.1 as build COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false loki-canary -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/loki/cmd/loki-canary/loki-canary /usr/bin/loki-canary ENTRYPOINT [ "/usr/bin/loki-canary" ] diff --git a/cmd/loki-canary/Dockerfile.cross b/cmd/loki-canary/Dockerfile.cross index 1d5185abd3e2..305a8d1a3b3b 100644 --- a/cmd/loki-canary/Dockerfile.cross +++ b/cmd/loki-canary/Dockerfile.cross @@ -1,8 +1,8 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.27.1 +ARG BUILD_IMAGE=grafana/loki-build-image:0.28.1 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f cmd/promtail/Dockerfile . -FROM golang:1.19.5-alpine as goenv +FROM golang:1.20.1-alpine as goenv RUN go env GOARCH > /goarch && \ go env GOARM > /goarm @@ -12,7 +12,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && GOARCH=$(cat /goarch) GOARM=$(cat /goarm) make BUILD_IN_CONTAINER=false loki-canary -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/loki/cmd/loki-canary/loki-canary /usr/bin/loki-canary ENTRYPOINT [ "/usr/bin/loki-canary" ] diff --git a/cmd/loki/Dockerfile b/cmd/loki/Dockerfile index 82c836cfa87b..a063fafcee79 100644 --- a/cmd/loki/Dockerfile +++ b/cmd/loki/Dockerfile @@ -1,10 +1,10 @@ -FROM golang:1.19.5 as build +FROM golang:1.20.1 as build COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false loki -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN apk add --no-cache ca-certificates libcap @@ -17,9 +17,6 @@ RUN mkdir -p /loki/rules && \ mkdir -p /loki/rules-temp && \ chown -R loki:loki /etc/loki /loki -# See https://github.com/grafana/loki/issues/1928 -RUN [ ! -e /etc/nsswitch.conf ] && echo 'hosts: files dns' > /etc/nsswitch.conf - USER 10001 EXPOSE 3100 ENTRYPOINT [ "/usr/bin/loki" ] diff --git a/cmd/loki/Dockerfile.cross b/cmd/loki/Dockerfile.cross index 9168dbab5ea6..442c25cd760d 100644 --- a/cmd/loki/Dockerfile.cross +++ b/cmd/loki/Dockerfile.cross @@ -1,8 +1,8 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.27.1 +ARG BUILD_IMAGE=grafana/loki-build-image:0.28.1 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile . -FROM golang:1.19.5-alpine as goenv +FROM golang:1.20.1-alpine as goenv RUN go env GOARCH > /goarch && \ go env GOARM > /goarm @@ -12,7 +12,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && GOARCH=$(cat /goarch) GOARM=$(cat /goarm) make BUILD_IN_CONTAINER=false loki -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN apk add --no-cache ca-certificates @@ -24,9 +24,6 @@ RUN addgroup -g 10001 -S loki && \ RUN mkdir -p /loki && \ chown -R loki:loki /etc/loki /loki -# See https://github.com/grafana/loki/issues/1928 -RUN [ ! -e /etc/nsswitch.conf ] && echo 'hosts: files dns' > /etc/nsswitch.conf - USER 10001 EXPOSE 3100 ENTRYPOINT [ "/usr/bin/loki" ] diff --git a/cmd/loki/Dockerfile.debug b/cmd/loki/Dockerfile.debug index e8c91e42086a..eae2b64bc6f3 100644 --- a/cmd/loki/Dockerfile.debug +++ b/cmd/loki/Dockerfile.debug @@ -1,13 +1,13 @@ # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/loki -f cmd/loki/Dockerfile.debug . -FROM grafana/loki-build-image:0.27.1 as build +FROM grafana/loki-build-image:0.28.1 as build ARG GOARCH="amd64" COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false loki-debug -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/loki/cmd/loki/loki-debug /usr/bin/loki-debug COPY --from=build /usr/bin/dlv /usr/bin/dlv diff --git a/cmd/loki/loki-local-config.yaml b/cmd/loki/loki-local-config.yaml index 5d2ea845f27d..68c3c9fb0890 100644 --- a/cmd/loki/loki-local-config.yaml +++ b/cmd/loki/loki-local-config.yaml @@ -5,6 +5,7 @@ server: grpc_listen_port: 9096 common: + instance_addr: 127.0.0.1 path_prefix: /tmp/loki storage: filesystem: @@ -12,7 +13,6 @@ common: rules_directory: /tmp/loki/rules replication_factor: 1 ring: - instance_addr: 127.0.0.1 kvstore: store: inmemory diff --git a/cmd/migrate/Dockerfile b/cmd/migrate/Dockerfile index a5e9d4a31f23..3e04d019f740 100644 --- a/cmd/migrate/Dockerfile +++ b/cmd/migrate/Dockerfile @@ -1,9 +1,9 @@ -FROM golang:1.19.5 as build +FROM golang:1.20.1 as build COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false migrate -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/loki/cmd/migrate/migrate /usr/bin/migrate #ENTRYPOINT [ "/usr/bin/migrate" ] diff --git a/cmd/querytee/Dockerfile b/cmd/querytee/Dockerfile index e1e46d85fd5c..20beb06da29a 100644 --- a/cmd/querytee/Dockerfile +++ b/cmd/querytee/Dockerfile @@ -1,10 +1,10 @@ -FROM golang:1.19.5 as build +FROM golang:1.20.1 as build COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false loki-querytee -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/loki/cmd/querytee/querytee /usr/bin/querytee ENTRYPOINT [ "/usr/bin/querytee" ] diff --git a/cmd/querytee/Dockerfile.cross b/cmd/querytee/Dockerfile.cross index 1992241475e9..b3d29396657d 100644 --- a/cmd/querytee/Dockerfile.cross +++ b/cmd/querytee/Dockerfile.cross @@ -1,8 +1,8 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.27.1 +ARG BUILD_IMAGE=grafana/loki-build-image:0.28.1 # Directories in this file are referenced from the root of the project not this folder # This file is intended to be called from the root like so: # docker build -t grafana/promtail -f cmd/promtail/Dockerfile . -FROM golang:1.19.5-alpine as goenv +FROM golang:1.20.1-alpine as goenv RUN go env GOARCH > /goarch && \ go env GOARM > /goarm @@ -12,7 +12,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && GOARCH=$(cat /goarch) GOARM=$(cat /goarm) make BUILD_IN_CONTAINER=false loki-querytee -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN apk add --update --no-cache ca-certificates COPY --from=build /src/loki/cmd/querytee/querytee /usr/bin/querytee ENTRYPOINT [ "/usr/bin/querytee" ] diff --git a/docs/README.md b/docs/README.md index 5201cda98a28..4e964e0cabca 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,3 +4,5 @@ Like Prometheus, but for logs!

Please see the [Documentation Site](https://grafana.com/docs/loki/latest/) The files in this directory are used to generate it but consequently the links don't work in Github. + +To contribute to these docs, refer to the `Documentation` section in [CONTRIBUTING.md](../CONTRIBUTING.md). \ No newline at end of file diff --git a/docs/sources/clients/lambda-promtail/_index.md b/docs/sources/clients/lambda-promtail/_index.md index 6b2fed605f2f..fb37e247d7ef 100644 --- a/docs/sources/clients/lambda-promtail/_index.md +++ b/docs/sources/clients/lambda-promtail/_index.md @@ -109,6 +109,12 @@ This workflow allows ingesting AWS loadbalancer logs stored on S3 to Loki. Cloudfront [real-time logs](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/real-time-logs.html) can be sent to a Kinesis data stream. The data stream can be mapped to be an [event source](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html) for lambda-promtail to deliver the logs to Loki. +### Triggering Lambda-Promtail via SQS +For AWS services supporting sending messages to SQS (for example, S3 with an S3 Notification to SQS), events can be processed through an [SQS queue using a lambda trigger](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html) instead of directly configuring the source service to trigger lambda. Lambda-promtail will retrieve the nested events from the SQS messages' body and process them as if them came directly from the source service. + +### On-Failure log recovery using SQS +Triggering lambda-promtail through SQS allows handling on-failure recovery of the logs using a secondary SQS queue as a dead-letter-queue (DLQ). You can configure lambda so that unsuccessfully processed messages will be sent to the DLQ. After fixing the issue, operators will be able to reprocess the messages by sending back messages from the DLQ to the source queue using the [SQS DLQ redrive](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-dead-letter-queue-redrive.html) feature. + ## Propagated Labels Incoming logs can have seven special labels assigned to them which can be used in [relabeling]({{}}) or later stages in a Promtail [pipeline]({{}}): diff --git a/docs/sources/clients/promtail/configuration.md b/docs/sources/clients/promtail/configuration.md index 13cf60e6dbe0..e3fda0d9b0c9 100644 --- a/docs/sources/clients/promtail/configuration.md +++ b/docs/sources/clients/promtail/configuration.md @@ -877,6 +877,8 @@ Note the `server` configuration is the same as [server](#server). Promtail also exposes a second endpoint on `/promtail/api/v1/raw` which expects newline-delimited log lines. This can be used to send NDJSON or plaintext logs. +The readiness of the loki_push_api server can be checked using the endpoint `/ready`. + ```yaml # The push server configuration options [server: ] diff --git a/docs/sources/clients/promtail/pipelines.md b/docs/sources/clients/promtail/pipelines.md index 1fb80ea4ae3d..cc7f82eaab87 100644 --- a/docs/sources/clients/promtail/pipelines.md +++ b/docs/sources/clients/promtail/pipelines.md @@ -204,6 +204,7 @@ Parsing stages: - [cri]({{}}): Extract data by parsing the log line using the standard CRI format. - [regex]({{}}): Extract data using a regular expression. - [json]({{}}): Extract data by parsing the log line as JSON. + - [eventlogmessage]({{}}): Extract data by parsing the Message field from the Windows Event Log. Transform stages: diff --git a/docs/sources/clients/promtail/scraping.md b/docs/sources/clients/promtail/scraping.md index f6017e21ff7d..334b97b61257 100644 --- a/docs/sources/clients/promtail/scraping.md +++ b/docs/sources/clients/promtail/scraping.md @@ -194,6 +194,9 @@ resuming the target without skipping logs. Read the [configuration]({{< relref "configuration#windows_events" >}}) section for more information. +See the [eventlogmessage]({{}}) stage for extracting +data from the `message`. + ## GCP Log scraping Promtail supports scraping cloud resource logs such as GCS bucket logs, load balancer logs, and Kubernetes cluster logs from GCP. diff --git a/docs/sources/clients/promtail/stages/_index.md b/docs/sources/clients/promtail/stages/_index.md index bdb2c1e488ed..03b9ef19b5dd 100644 --- a/docs/sources/clients/promtail/stages/_index.md +++ b/docs/sources/clients/promtail/stages/_index.md @@ -16,6 +16,7 @@ Parsing stages: - [logfmt]({{}}): Extract data by parsing the log line as logfmt. - [replace]({{}}): Replace data using a regular expression. - [multiline]({{}}): Merge multiple lines into a multiline block. + - [geoip]({{}}): Extract geoip data from extracted labels. Transform stages: diff --git a/docs/sources/clients/promtail/stages/eventlogmessage.md b/docs/sources/clients/promtail/stages/eventlogmessage.md new file mode 100644 index 000000000000..2e08243cac3e --- /dev/null +++ b/docs/sources/clients/promtail/stages/eventlogmessage.md @@ -0,0 +1,68 @@ +--- +title: eventlogmessage +description: eventlogmessage stage +--- +# eventlogmessage + +The `eventlogmessage` stage is a parsing stage that extracts data from the Message string that appears in the Windows Event Log. + +## Schema + +```yaml +eventlogmessage: + # Name from extracted data to parse, defaulting to the name + # used by the windows_events scraper + [source: | default = message] + + # If previously extracted data exists for a key that occurs + # in the Message, when true, the previous value will be + # overwriten by the value in the Message. Otherwise, + # '_extracted' will be appended to the key that is used for + # the value in the Message. + [overwrite_existing: | default = false] + + # When true, keys extracted from the Message that are not + # valid labels will be dropped, otherwise they will be + # automatically converted into valid labels replacing invalid + # characters with underscores + [drop_invalid_labels: | default = false] +``` + +The extracted data can hold non-string values and this stage does not do any +type conversions; downstream stages will need to perform correct type +conversion of these values as necessary. Please refer to the +[the `template` stage]({{}}) for how to do this. + +## Example combined with json + +For the given pipeline: + +```yaml +- json: + expressions: + message: + Overwritten: +- eventlogmessage: + source: message + overwrite_existing: true +``` + +Given the following log line: + +``` +{"event_id": 1, "Overwritten": "old", "message": "Message type:\r\nOverwritten: new\r\nImage: C:\\Users\\User\\promtail.exe"} +``` + +The first stage would create the following key-value pairs in the set of +extracted data: + +- `message`: `Message type:\r\nOverwritten: new\r\nImage: C:\Users\User\promtail.exe` +- `Overwritten`: `old` + +The second stage will parse the value of `message` from the extracted data +and append/overwrite the following key-value pairs to the set of extracted data: + +- `Image`: `C:\\Users\\User\\promtail.exe` +- `Message_type`: (empty string) +- `Overwritten`: `new` + diff --git a/docs/sources/clients/promtail/stages/geoip.md b/docs/sources/clients/promtail/stages/geoip.md new file mode 100644 index 000000000000..393d5ad9d358 --- /dev/null +++ b/docs/sources/clients/promtail/stages/geoip.md @@ -0,0 +1,130 @@ +--- +title: geoip +description: geoip stage +--- +# geoip + +The `geoip` stage is a parsing stage that reads an ip address and +populates the labelset with geoip fields. Maxmind's GeoIP2 databse is used for the lookup. + +Populated fields for City db: + +- geoip_city_name +- geoip_country_name +- geoip_continet_name +- geoip_continent_code +- geoip_location_latitude +- geoip_location_longitude +- geoip_postal_code +- geoip_timezone +- geoip_subdivision_name +- geoip_subdivision_code + +Populated fields for ASN (Autonomous System Number) db: + +- geoip_autonomous_system_number +- geoip_autonomous_system_organization + +## Schema + +```yaml +geoip: + # Path to the Maxmind DB file + [db: ] + + # IP from extracted data to parse. + [source: ] + + # Maxmind DB type. Allowed values are "city", "asn" + [db_type: ] +``` + +## GeoIP with City database example + +For the given pipeline + +```yaml +- regex: + expression: "^(?P\S+) .*" +- geoip: + db: "/path/to/GeoIP2-City.mmdb" + source: "ip" + db_type: "city" +``` + +And the log line: + +``` +"34.120.177.193 - "POST /loki/api/push/ HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6" +``` + +The `regex` stage parses the log line and `ip` is extracted. Then the extracted `ip` value is given as `source` to `geoip` stage. The `geoip` stage performs a lookup on the `ip` and populates the following labels: + +- `geoip_city_name`: `Kansas City` +- `geoip_country_name`: `United States` +- `geoip_continet_name`: `North America` +- `geoip_continent_code`: `NA` +- `geoip_location_latitude`: `"39.1027` +- `geoip_location_longitude`: `-94.5778` +- `geoip_postal_code`: `64184` +- `geoip_timezone`: `America/Chicago` +- `geoip_subdivision_name`: `Missouri` +- `geoip_subdivision_code`: `MO` + +If only a subset of these labels are required, you can chain the above pipeline with the `labeldrop` or `labelallow` stage. + +### labelallow example + +```yaml +- regex: + expression: "^(?P\S+) .*" +- geoip: + db: "/path/to/GeoCity.mmdb" + source: "ip" + db_type: "city" +- labelallow: + - geoip_city_name + - geoip_country_name + - geoip_location_latitude + - geoip_location_longitude +``` + +Only the labels listed under `labelallow` will be sent to Loki. + +### labeldrop example + +```yaml +- regex: + expression: "^(?P\S+) .*" +- geoip: + db: "/path/to/GeoCity.mmdb" + source: "ip" + db_type: "city" +- labeldrop: + - geoip_postal_code + - geoip_subdivision_code +``` + +All the labels except the ones listed under `labeldrop` will be sent to Loki. + +## GeoIP with ASN (Autonomous System Number) database example + +```yaml +- regex: + expression: "^(?P\S+) .*" +- geoip: + db: "/path/to/GeoIP2-ASN.mmdb" + source: "ip" + db_type: "asn" +``` + +And the log line: + +``` +"34.120.177.193 - "POST /loki/api/push/ HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6" +``` + +The `regex` stage parses the log line and `ip` is extracted. Then the extracted `ip` value is given as `source` to `geoip` stage. The `geoip` stage performs a lookup on the `ip` and populates the following labels: + +- `geoip_autonomous_system_number`: `396982` +- `geoip_autonomous_system_organization`: `GOOGLE-CLOUD-PLATFORM` diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index 476befbdd9ec..5dfc66145d7a 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -778,6 +778,10 @@ storage: # The CLI flags prefix for this block configuration is: ruler.storage [azure: ] + # Configures backend rule storage for AlibabaCloud Object Storage (OSS). + # The CLI flags prefix for this block configuration is: ruler + [alibabacloud: ] + # Configures backend rule storage for GCS. # The CLI flags prefix for this block configuration is: ruler.storage [gcs: ] @@ -1474,6 +1478,11 @@ ring: The `storage_config` block configures one of many possible stores for both the index and chunks. Which configuration to be picked should be defined in schema_config block. ```yaml +# The alibabacloud_storage_config block configures the connection to Alibaba +# Cloud Storage object storage backend. +# The CLI flags prefix for this block configuration is: common +[alibabacloud: ] + # The aws_storage_config block configures the connection to dynamoDB and S3 # object storage. Either one of them or both can be configured. [aws: ] @@ -2807,6 +2816,10 @@ storage: # The CLI flags prefix for this block configuration is: common.storage [azure: ] + # The alibabacloud_storage_config block configures the connection to Alibaba + # Cloud Storage object storage backend. + [alibabacloud: ] + # The bos_storage_config block configures the connection to Baidu Object # Storage (BOS) object storage backend. # The CLI flags prefix for this block configuration is: common.storage @@ -3609,6 +3622,11 @@ dynamodb: # CLI flag: -dynamodb.max-retries [max_retries: | default = 20] + # KMS key used for encrypting DynamoDB items. DynamoDB will use an Amazon + # owned KMS key if not provided. + # CLI flag: -dynamodb.kms-key-id + [kms_key_id: | default = ""] + # S3 endpoint URL with escaped Key and Secret encoded. If only region is # specified as a host, proper endpoint will be deduced. Use # inmemory:/// to use a mock in-memory implementation. @@ -3640,6 +3658,10 @@ dynamodb: # CLI flag: -s3.secret-access-key [secret_access_key: | default = ""] +# AWS Session Token +# CLI flag: -s3.session-token +[session_token: | default = ""] + # Disable https on s3 connection. # CLI flag: -s3.insecure [insecure: | default = false] @@ -3673,6 +3695,12 @@ http_config: # CLI flag: -s3.signature-version [signature_version: | default = "v4"] +# The S3 storage class which objects will use. Supported values are: GLACIER, +# DEEP_ARCHIVE, GLACIER_IR, INTELLIGENT_TIERING, ONEZONE_IA, OUTPOSTS, +# REDUCED_REDUNDANCY, STANDARD, STANDARD_IA. +# CLI flag: -s3.storage-class +[storage_class: | default = "STANDARD"] + sse: # Enable AWS Server Side Encryption. Supported values: SSE-KMS, SSE-S3. # CLI flag: -s3.sse.type @@ -3796,6 +3824,33 @@ The `azure_storage_config` block configures the connection to Azure object stora [max_retry_delay: | default = 500ms] ``` +### alibabacloud_storage_config + +The `alibabacloud_storage_config` block configures the connection to Alibaba Cloud Storage object storage backend. The supported CLI flags `` used to reference this configuration block are: + +- `common` +- `ruler` + +  + +```yaml +# Name of OSS bucket. +# CLI flag: -common.storage.oss.bucketname +[bucket: | default = ""] + +# oss Endpoint to connect to. +# CLI flag: -common.storage.oss.endpoint +[endpoint: | default = ""] + +# alibabacloud Access Key ID +# CLI flag: -common.storage.oss.access-key-id +[access_key_id: | default = ""] + +# alibabacloud Secret Access Key +# CLI flag: -common.storage.oss.secret-access-key +[secret_access_key: | default = ""] +``` + ### gcs_storage_config The `gcs_storage_config` block configures the connection to Google Cloud Storage object storage backend. The supported CLI flags `` used to reference this configuration block are: @@ -3877,6 +3932,10 @@ The `s3_storage_config` block configures the connection to Amazon S3 object stor # CLI flag: -.storage.s3.secret-access-key [secret_access_key: | default = ""] +# AWS Session Token +# CLI flag: -.storage.s3.session-token +[session_token: | default = ""] + # Disable https on s3 connection. # CLI flag: -.storage.s3.insecure [insecure: | default = false] @@ -3910,6 +3969,12 @@ http_config: # CLI flag: -.storage.s3.signature-version [signature_version: | default = "v4"] +# The S3 storage class which objects will use. Supported values are: GLACIER, +# DEEP_ARCHIVE, GLACIER_IR, INTELLIGENT_TIERING, ONEZONE_IA, OUTPOSTS, +# REDUCED_REDUNDANCY, STANDARD, STANDARD_IA. +# CLI flag: -.storage.s3.storage-class +[storage_class: | default = "STANDARD"] + sse: # Enable AWS Server Side Encryption. Supported values: SSE-KMS, SSE-S3. # CLI flag: -.storage.s3.sse.type @@ -3984,6 +4049,10 @@ The `swift_storage_config` block configures the connection to OpenStack Object S # CLI flag: -.swift.auth-url [auth_url: | default = ""] +# Set this to true to use the internal OpenStack Swift endpoint URL +# CLI flag: -.swift.internal +[internal: | default = false] + # OpenStack Swift username. # CLI flag: -.swift.username [username: | default = ""] @@ -4087,6 +4156,8 @@ Named store from this example can be used by setting object_store to store-1 in [gcs: ] +[alibabacloud: ] + [swift: ] ``` diff --git a/docs/sources/configuration/examples.md b/docs/sources/configuration/examples.md index 6410a870c8e5..4a2951cf6c16 100644 --- a/docs/sources/configuration/examples.md +++ b/docs/sources/configuration/examples.md @@ -4,9 +4,35 @@ description: Loki Configuration Examples --- # Examples +## alibaba-cloud-storage-config.yaml + +```yaml + +# This partial configuration uses Alibaba for chunk storage + +schema_config: + configs: + - from: 2020-05-15 + object_store: alibabacloud + schema: v11 + index: + prefix: loki_index_ + period: 168h + +storage_config: + alibabacloud: + bucket: + endpoint: + access_key_id: + secret_access_key: + +``` + + ## 1-Local-Configuration-Example.yaml ```yaml + auth_enabled: false server: @@ -35,6 +61,7 @@ schema_config: ## 2-S3-Cluster-Example.yaml ```yaml + # This is a complete configuration to deploy Loki backed by a s3-Comaptible API # like MinIO for storage. Loki components will use memberlist ring to shard and # the index will be shipped to storage via boltdb-shipper. @@ -86,6 +113,7 @@ compactor: ## 3-S3-Without-Credentials-Snippet.yaml ```yaml + # If you don't wish to hard-code S3 credentials you can also configure an EC2 # instance role by changing the `storage_config` section @@ -109,6 +137,7 @@ storage_config: ## 4-BOS-Example.yaml ```yaml + schema_config: configs: - from: 2020-05-15 @@ -140,6 +169,7 @@ compactor: ## 5-S3-And-DynamoDB-Snippet.yaml ```yaml + # This partial configuration uses S3 for chunk storage and uses DynamoDB for index storage schema_config: @@ -162,6 +192,7 @@ storage_config: ## 6-Cassandra-Snippet.yaml ```yaml + # This is a partial config that uses the local filesystem for chunk storage and Cassandra for index storage schema_config: @@ -191,6 +222,7 @@ storage_config: ## 7-Schema-Migration-Snippet.yaml ```yaml + schema_config: configs: # Starting from 2018-04-15 Loki should store indexes on Cassandra @@ -219,6 +251,7 @@ schema_config: ## 8-GCS-Snippet.yaml ```yaml + # This partial configuration uses GCS for chunk storage and uses BigTable for index storage schema_config: @@ -244,6 +277,7 @@ storage_config: ## 9-Expanded-S3-Snippet.yaml ```yaml + # S3 configuration supports an expanded configuration. # Either an `s3` endpoint URL can be used, or an expanded configuration can be used. @@ -272,3 +306,30 @@ storage_config: ``` + +## 10-S3-And-DynamoDB-With-KMS-Snippet.yaml + +```yaml + +# This partial configuration uses S3 for chunk storage and uses DynamoDB for index storage and a KMS CMK for encryption + +schema_config: + configs: + - from: 2020-05-15 + store: aws + object_store: s3 + schema: v11 + index: + prefix: loki_ +storage_config: + aws: + s3: s3://access_key:secret_access_key@region/bucket_name + sse_encryption: true + sse: + type: SSE-KMS + kms_key_id: 1234abcd-12ab-34cd-56ef-1234567890ab + dynamodb: + dynamodb_url: dynamodb://access_key:secret_access_key@region + kms_key_id: 0987dcba-09fe-87dc-65ba-ab0987654321 +``` + diff --git a/docs/sources/configuration/examples/10-S3-And-DynamoDB-With-KMS-Snippet.yaml b/docs/sources/configuration/examples/10-S3-And-DynamoDB-With-KMS-Snippet.yaml new file mode 100644 index 000000000000..39b74ff6153b --- /dev/null +++ b/docs/sources/configuration/examples/10-S3-And-DynamoDB-With-KMS-Snippet.yaml @@ -0,0 +1,20 @@ +# This partial configuration uses S3 for chunk storage and uses DynamoDB for index storage and a KMS CMK for encryption + +schema_config: + configs: + - from: 2020-05-15 + store: aws + object_store: s3 + schema: v11 + index: + prefix: loki_ +storage_config: + aws: + s3: s3://access_key:secret_access_key@region/bucket_name + sse_encryption: true + sse: + type: SSE-KMS + kms_key_id: 1234abcd-12ab-34cd-56ef-1234567890ab + dynamodb: + dynamodb_url: dynamodb://access_key:secret_access_key@region + kms_key_id: 0987dcba-09fe-87dc-65ba-ab0987654321 \ No newline at end of file diff --git a/docs/sources/configuration/examples/alibaba-cloud-storage-config.yaml b/docs/sources/configuration/examples/alibaba-cloud-storage-config.yaml new file mode 100644 index 000000000000..56ea9cd9c1e6 --- /dev/null +++ b/docs/sources/configuration/examples/alibaba-cloud-storage-config.yaml @@ -0,0 +1,17 @@ +# This partial configuration uses Alibaba for chunk storage + +schema_config: + configs: + - from: 2020-05-15 + object_store: alibabacloud + schema: v11 + index: + prefix: loki_index_ + period: 168h + +storage_config: + alibabacloud: + bucket: + endpoint: + access_key_id: + secret_access_key: diff --git a/docs/sources/installation/helm/install-scalable/index.md b/docs/sources/installation/helm/install-scalable/index.md index 210d775ab05a..9ba75f987991 100644 --- a/docs/sources/installation/helm/install-scalable/index.md +++ b/docs/sources/installation/helm/install-scalable/index.md @@ -14,7 +14,7 @@ keywords: [] This Helm Chart installation runs the Grafana Loki cluster within a Kubernetes cluster. -If object storge is configured, this chart configures Loki to run `read` and `write` targets in a [scalable mode]({{}}), highly available architecture (3 replicas of each) designed to work with AWS S3 object storage. It will also configure meta-monitoring of metrics and logs. +If object storge is configured, this chart configures Loki to run `read` and `write` targets in a [scalable mode]({{}}), highly available architecture (3 replicas of each) designed to work with AWS S3 object storage. It will also configure meta-monitoring of metrics and logs. It is not possible to run the scalable mode with the `filesystem` storage. @@ -23,6 +23,7 @@ It is not possible to run the scalable mode with the `filesystem` storage. - Helm 3 or above. See [Installing Helm](https://helm.sh/docs/intro/install/). - A running Kubernetes cluster. - A Prometheus operator installation in case meta-monitoring should be used. +- Optionally a Memcached deployment for better performance. Consult the [caching section]({{}}) on how to configure Memcached. **To deploy Loki in scalable mode:** @@ -40,30 +41,37 @@ It is not possible to run the scalable mode with the `filesystem` storage. 1. Configure the object storage: - - Create the configuration file `values.yaml`: + - Create the configuration file `values.yaml`. The example below illustrates a s3 configuration: ```yaml - storage: - bucketNames: - chunks: chunks - ruler: ruler - admin: admin - type: s3 - s3: - endpoint: - region: - secretAccessKey: - accessKeyId: - s3ForcePathStyle: false - insecure: false + loki: + storage: + bucketNames: + chunks: chunks + ruler: ruler + admin: admin + type: s3 + s3: + endpoint: + region: + secretAccessKey: + accessKeyId: + s3ForcePathStyle: false + insecure: false ``` Consult the [Reference]({{}}) for configuring other storage providers. - - Define the AWS S3 credentials in the file. + - If you're just trying things, you can use the following configuration instead, that sets MinIO as storage: + ```yaml + minio: + enabled: true + ``` -1. Upgrade the Loki deployment with this command. +1. Install or upgrade the Loki deployment with this command. ```bash + helm install --values values.yaml loki grafana/loki + # or upgrade for existing installations helm upgrade --values values.yaml loki grafana/loki ``` diff --git a/docs/sources/installation/helm/monitor-and-alert/_index.md b/docs/sources/installation/helm/monitor-and-alert/_index.md new file mode 100644 index 000000000000..c5058aaab7bc --- /dev/null +++ b/docs/sources/installation/helm/monitor-and-alert/_index.md @@ -0,0 +1,19 @@ +--- +title: Monitoring +description: monitoring +weight: 200 +aliases: + - /docs/installation/helm/monitor-and-alert +keywords: + - helm + - scalable + - simple-scalable + - monitor +--- + +# Monitoring + +There are two common ways to monitor Loki: + +- [Monitor using Grafana Cloud (recommended)]({{}}) +- [Monitor using using Local Monitoring]({{}}) diff --git a/docs/sources/installation/helm/monitor-and-alert/with-grafana-cloud.md b/docs/sources/installation/helm/monitor-and-alert/with-grafana-cloud.md new file mode 100644 index 000000000000..1ed3caea15f0 --- /dev/null +++ b/docs/sources/installation/helm/monitor-and-alert/with-grafana-cloud.md @@ -0,0 +1,100 @@ +--- +title: Configure Monitoring and Alerting of Loki Using Grafana Cloud +menuTitle: Monitor Loki with Grafana Cloud +description: setup monitoring and alerts for Loki using Grafana Cloud +aliases: + - /docs/installation/helm/monitoring/with-grafana-cloud +weight: 100 +keywords: + - monitoring + - alert + - alerting + - grafana cloud +--- + +# Configure Monitoring and Alerting of Loki Using Grafana Cloud + +This topic will walk you through using Grafana Cloud to monitor a Loki installation that is installed with the Helm chart. This approach leverages many of the chart's _self monitoring_ features, but instead of sending logs back to Loki itself, it sends them to a Grafana Cloud Logs instance. This approach also does not require the installation of the Prometheus Operator and instead sends metrics to a Grafana Cloud Metrics instance. Using Grafana Cloud to monitor Loki has the added benefit of being able to troubleshoot problems with Loki when the Helm installed Loki is down, as the logs will still be available in the Grafana Cloud Logs instance. + +**Before you begin:** + +- Helm 3 or above. See [Installing Helm](https://helm.sh/docs/intro/install/). +- A Grafana Cloud account and stack (including Cloud Grafana, Cloud Metrics, and Cloud Logs) +- [Grafana Kubernetes Monitoring using Agent](/docs/grafana-cloud/kubernetes-monitoring/configuration/config-k8s-agent-guide/) configured for the Kubernetes cluster +- A running Loki deployment installed in that Kubernetes cluster via the Helm chart + +**Prequisites for Monitoring Loki:** + +You must setup the Grafana Kubernetes Integration following the instructions in [Grafana Kubernetes Monitoring using Agent](/docs/grafana-cloud/kubernetes-monitoring/configuration/config-k8s-agent-guide/) as this will install necessary components for collecting metrics about your Kubernetes cluster and sending them to Grafana Cloud. Many of the dashboards installed as a part of the Loki integration rely on these metrics. + +Walking through this installation will create two Grafana Agent configurations, one for metrics and one for logs, that will add the external label `cluster: cloud`. In order for the Dashboards in the self-hosted Grafana Loki integration to work, the cluster name needs to match your Helm installation name. If you installed Loki using the command `helm install best-loki-cluster grafana/loki`, you would need to change the `cluster` value in both Grafana Agent configurations from `cloud` to `best-loki-cluster` when setting up the Grafana Kubernetes integration. + +**To set up the Loki integration in Grafana Cloud:** + +1. Get valid Push credentials for your Cloud Metrics and Cloud Logs instances. +1. Create a secret in the same namespace as Loki to store your Cloud Logs credentials. + + ```bash + cat <<'EOF' | NAMESPACE=loki /bin/sh -c 'kubectl apply -n $NAMESPACE -f -' + apiVersion: v1 + data: + password: + username: + kind: Secret + metadata: + name: grafana-cloud-logs-credentials + type: Opaque + EOF + ``` + +1. Create a secret to store your Cloud Metrics credentials. + + ```bash + cat <<'EOF' | NAMESPACE=loki /bin/sh -c 'kubectl apply -n $NAMESPACE -f -' + apiVersion: v1 + data: + password: + username: + kind: Secret + metadata: + name: grafana-cloud-metrics-credentials + type: Opaque + EOF + ``` + +1. Enable monitoring metrics and logs for the Loki installation to be sent your cloud database instances by adding the following to your Helm `values.yaml` file: + + ```yaml + --- + monitoring: + dashboards: + enabled: false + rules: + enabled: false + selfMonitoring: + logsInstance: + clients: + - url: + basicAuth: + username: + name: grafana-cloud-logs-credentials + key: username + password: + name: grafana-cloud-logs-credentials + key: password + serviceMonitor: + metricsInstance: + remoteWrite: + - url: + basicAuth: + username: + name: grafana-cloud-metrics-credentials + key: username + password: + name: grafana-cloud-metrics-credentials + key: password + ``` + +1. Install the self-hosted Grafana Loki integration by going to your hosted Grafana instance, clicking the lightning bolt icon labeled **Integrations and Connections**, then search for and install the **Self-hosted Grafana Loki** integration. + +1. Once the self-hosted Grafana Loki integration is installed, click the **View Dashboards** button to see the installed dashboards. diff --git a/docs/sources/installation/helm/monitor-and-alert/index.md b/docs/sources/installation/helm/monitor-and-alert/with-local-monitoring.md similarity index 96% rename from docs/sources/installation/helm/monitor-and-alert/index.md rename to docs/sources/installation/helm/monitor-and-alert/with-local-monitoring.md index ee6f3150863d..20a02ef40f87 100644 --- a/docs/sources/installation/helm/monitor-and-alert/index.md +++ b/docs/sources/installation/helm/monitor-and-alert/with-local-monitoring.md @@ -3,7 +3,7 @@ title: Configure monitoring and alerting menuTitle: Configure monitoring and alerting description: setup monitoring and alerts for the Helm Chart aliases: - - /docs/installation/helm/monitoring + - /docs/installation/helm/monitoring/with-local-monitoring weight: 100 keywords: - monitoring @@ -13,7 +13,7 @@ keywords: # Configure monitoring and alerting -By default this Helm Chart configures meta-monitoring of metrics (service monitoring) and logs (self monitoring). +By default this Helm Chart configures meta-monitoring of metrics (service monitoring) and logs (self monitoring). This topic will walk you through configuring monitoring using a monitoring solution local to the same cluster where Loki is installed. The `ServiceMonitor` resource works with either the Prometheus Operator or the Grafana Agent Operator, and defines how Loki's metrics should be scraped. Scraping this Loki cluster using the scrape config defined in the `SerivceMonitor` resource is required for the included dashboards to work. A `MetricsInstance` can be configured to write the metrics to a remote Prometheus instance such as Grafana Cloud Metrics. diff --git a/docs/sources/installation/helm/reference.md b/docs/sources/installation/helm/reference.md index b99f7d4661e3..0ea5c410048e 100644 --- a/docs/sources/installation/helm/reference.md +++ b/docs/sources/installation/helm/reference.md @@ -173,6 +173,15 @@ null
 {}
 
+ + + + backend.podLabels + object + Additional labels for each `backend` pod +
+{}
+
@@ -794,9 +803,9 @@ null gateway.basicAuth.htpasswd string - Uses the specified username and password to compute a htpasswd using Sprig's `htpasswd` function. The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes high CPU load. + Uses the specified users from the `loki.tenants` list to create the htpasswd file if `loki.tenants` is not set, the `gateway.basicAuth.username` and `gateway.basicAuth.password` are used The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes high CPU load.
-"{{ htpasswd (required \"'gateway.basicAuth.username' is required\" .Values.gateway.basicAuth.username) (required \"'gateway.basicAuth.password' is required\" .Values.gateway.basicAuth.password) }}"
+"{{ if .Values.loki.tenants }}\n  {{- range $t := .Values.loki.tenants }}\n{{ htpasswd (required \"All tenants must have a 'name' set\" $t.name) (required \"All tenants must have a 'password' set\" $t.password) }}\n  {{- end }}\n{{ else }} {{ htpasswd (required \"'gateway.basicAuth.username' is required\" .Values.gateway.basicAuth.username) (required \"'gateway.basicAuth.password' is required\" .Values.gateway.basicAuth.password) }} {{ end }}"
 
@@ -1004,6 +1013,33 @@ false
 {}
 
+ + + + gateway.nginxConfig.customBackendUrl + string + Override Backend URL +
+null
+
+ + + + gateway.nginxConfig.customReadUrl + string + Override Read URL +
+null
+
+ + + + gateway.nginxConfig.customWriteUrl + string + Override Write URL +
+null
+
@@ -1018,9 +1054,9 @@ See values.yaml gateway.nginxConfig.httpSnippet string - Allows appending custom configuration to the http block + Allows appending custom configuration to the http block, passed through the `tpl` function to allow templating
-""
+"{{ if .Values.loki.tenants }}proxy_set_header X-Scope-OrgID $remote_user;{{ end }}"
 
@@ -1557,6 +1593,7 @@ true Check https://grafana.com/docs/loki/latest/configuration/#common_config for more info on how to provide a common configuration
 {
+  "compactor_address": "{{ include \"loki.compactorAddress\" . }}",
   "path_prefix": "/var/loki",
   "replication_factor": 3
 }
@@ -1648,7 +1685,7 @@ true
 			string
 			Overrides the image tag whose default is the chart's appVersion TODO: needed for 3rd target backend functionality revert to null or latest once this behavior is relased
 			
-"main-5e53303"
+null
 
@@ -1844,7 +1881,6 @@ true "accountKey": null, "accountName": null, "requestTimeout": null, - "useFederatedToken": false, "useManagedIdentity": false, "userAssignedId": null }, @@ -1899,6 +1935,24 @@ true
 {}
 
+ + + + loki.tenants + list + Tenants list to be created on nginx htpasswd file, with name and password keys +
+[]
+
+ + + + memberlist.service.publishNotReadyAddresses + bool + +
+false
+
@@ -2350,9 +2404,9 @@ true monitoring.serviceMonitor.interval string - ServiceMonitor scrape interval + ServiceMonitor scrape interval Default is 15s because included recording rules use a 1m rate, and scrape interval needs to be at least 1/4 rate interval.
-null
+"15s"
 
@@ -2744,7 +2798,7 @@ null bool Whether or not to use the 2 target type simple scalable mode (read, write) or the 3 target type (read, write, backend). Legacy refers to the 2 target type, so true will run two targets, false will run 3 targets.
-false
+true
 
diff --git a/docs/sources/installation/local.md b/docs/sources/installation/local.md index 1004898370ee..e06e5d428643 100644 --- a/docs/sources/installation/local.md +++ b/docs/sources/installation/local.md @@ -55,9 +55,9 @@ The community provides packages of Loki for openSUSE Linux. To install: 1. Add the repository `https://download.opensuse.org/repositories/security:/logging/` to your system. For example, if you are using Leap 15.1, run `sudo zypper ar https://download.opensuse.org/repositories/security:/logging/openSUSE_Leap_15.1/security:logging.repo ; sudo zypper ref` -1. Install the Loki package with `zypper in loki` -1. Enable the Loki and Promtail services: - - `systemd start loki && systemd enable loki` - - `systemd start promtail && systemd enable promtail` -1. Modify the configuration files as needed: `/etc/loki/promtail.yaml` and +2. Install the Loki package with `zypper in loki` +3. Enable the Loki and Promtail services: + - `systemd start loki && systemd enable loki` + - `systemd start promtail && systemd enable promtail` +4. Modify the configuration files as needed: `/etc/loki/promtail.yaml` and `/etc/loki/loki.yaml`. diff --git a/docs/sources/installation/sizing/index.md b/docs/sources/installation/sizing/index.md index d618eb6d6b43..ee38c0995bff 100644 --- a/docs/sources/installation/sizing/index.md +++ b/docs/sources/installation/sizing/index.md @@ -66,7 +66,7 @@ This tool helps to generate a Helm Charts `values.yaml` file based on specified
- Defines the log volume in terrabytes expected to be ingested each day. + Defines the log volume in gigabytes, ie 1e+9 bytes, expected to be ingested each day. Defines the node type of the Kubernetes cluster. diff --git a/docs/sources/logql/analyzer/script.js b/docs/sources/logql/analyzer/script.js index eabfda4e2116..0c043ce72158 100644 --- a/docs/sources/logql/analyzer/script.js +++ b/docs/sources/logql/analyzer/script.js @@ -83,7 +83,8 @@ async function handleResponse(response) { } function handleError(error) { - document.getElementById("query-error").innerHTML = error + const template = Handlebars.compile("{{error_text}}"); + document.getElementById("query-error").innerHTML = template({error_text:error}) document.getElementById("query-error").classList.remove("hide"); resultsElement.classList.add("hide"); } diff --git a/docs/sources/logql/log_queries/_index.md b/docs/sources/logql/log_queries/_index.md index e41c6141e392..f7034b1fa814 100644 --- a/docs/sources/logql/log_queries/_index.md +++ b/docs/sources/logql/log_queries/_index.md @@ -403,25 +403,44 @@ The **json** parser operates in two modes: #### logfmt -The **logfmt** parser can be added using the `| logfmt` and will extract all keys and values from the [logfmt](https://brandur.org/logfmt) formatted log line. +The **logfmt** parser can operate in two modes: -For example the following log line: +1. **without** parameters: -```logfmt -at=info method=GET path=/ host=grafana.net fwd="124.133.124.161" service=8ms status=200 -``` + The **logfmt** parser can be added using `| logfmt` and will extract all keys and values from the [logfmt](https://brandur.org/logfmt) formatted log line. -will get those labels extracted: + For example the following log line: -```kv -"at" => "info" -"method" => "GET" -"path" => "/" -"host" => "grafana.net" -"fwd" => "124.133.124.161" -"service" => "8ms" -"status" => "200" -``` + ```logfmt + at=info method=GET path=/ host=grafana.net fwd="124.133.124.161" service=8ms status=200 + ``` + + will result in having the following labels extracted: + + ```kv + "at" => "info" + "method" => "GET" + "path" => "/" + "host" => "grafana.net" + "fwd" => "124.133.124.161" + "service" => "8ms" + "status" => "200" + ``` + +2. **with** parameters: + + Similar to [JSON](#json), using `| logfmt label="expression", another="expression"` in the pipeline will result in extracting only the fields specified by the labels. + + For example, `| logfmt host, fwd_ip="fwd"` will extract the labels `host` and `fwd` from the following log line: + ```logfmt + at=info method=GET path=/ host=grafana.net fwd="124.133.124.161" service=8ms status=200 + ``` + + And rename `fwd` to `fwd_ip`: + ```kv + "host" => "grafana.net" + "fwd_ip" => "124.133.124.161" + ``` #### Pattern diff --git a/docs/sources/operations/caching.md b/docs/sources/operations/caching.md new file mode 100644 index 000000000000..d13a58aaeee2 --- /dev/null +++ b/docs/sources/operations/caching.md @@ -0,0 +1,94 @@ +--- +title: Caching +menuTitle: Caching +description: Enable and configure memcached for caching. +weight: 100 +keywords: + - memcached + - caching +--- + +# Caching + +Loki supports caching of index writes and lookups, chunks and query results to +speed up query performance. This sections describes the recommended Memcached +configuration to enable caching for chunks and query results. The index lookup +cache is configured to be in-memory by default. + +## Before you begin + +- It is recommended to deploy four dedicated Memcached clusters. +- As of 2023-02-01, the `memcached:1.6.17-alpine` version of the library is recommended. +- Consult the Loki ksonnet [memcached](https://github.com/grafana/loki/blob/main/production/ksonnet/loki/memcached.libsonnet) deployment and the ksonnet [memcached library](https://github.com/grafana/jsonnet-libs/tree/master/memcached). + +## Steps + +To enable and configure Memcached: + +1. Deploy each Memcached service with at least three replicas and configure + each as follows: + 1. Chunk cache + ``` + --memory-limit=4096 --max-item-size=2m --conn-limit=1024 + ``` + 1. Query result cache + ``` + --memory-limit=1024 --max-item-size=5m --conn-limit=1024 + ``` + +1. Configure Loki to use the cache. + 1. If the Helm chart is used + + Set `memcached.chunk_cache.host` to the Memecache address for the chunk cache, `memcached.results_cache.host` to the Memecache address for the query result cache, `memcached.chunk_cache.enabled=true` and `memcached.results_cache.enabled=true`. + + Ensure that the connection limit of Memcached is at least `number_of_clients * max_idle_conns`. + + The options `host` and `service` depend on the type of installation. For example, using the `bitname/memcached` Helm Charts with the following commands, the `service` values are always `memcached`. + ``` + helm upgrade --install chunk-cache -n loki bitnami/memcached -f memcached-overrides.yaml + helm upgrade --install write-cache -n loki bitnami/memcached -f memcached-overrides.yaml + helm upgrade --install results-cache -n loki bitnami/memcached -f memcached-overrides.yaml + helm upgrade --install index-cache -n loki bitnami/memcached -f memcached-overrides.yaml + ``` + In this case, the Loki configuration would be + ```yaml + loki: + memcached: + chunk_cache: + enabled: true + host: chunk-cache-memcached.loki.svc + service: memcache + batch_size: 256 + parallelism: 10 + results_cache: + enabled: true + host: results-cache-memcached.loki.svc + service: memcache + default_validity: 12h + ``` + 1. If the Loki configuration is used + 1. Configure the chunk cache + ```yaml + chunk_store_config: + chunk_cache_config: + memcached: + batch_size: 256 + parallelism: 10 + memcached_client: + host: + service: + ``` + 1. Configure the query result cache + ```yaml + query_range: + cache_results: true + results_cache: + cache: + memcached_client: + consistent_hash: true + host: + service: + max_idle_conns: 16 + timeout: 500ms + update_interval: 1m + ``` diff --git a/docs/sources/operations/scalability.md b/docs/sources/operations/scalability.md index 974a1a60bd6a..f426a3db2195 100644 --- a/docs/sources/operations/scalability.md +++ b/docs/sources/operations/scalability.md @@ -5,9 +5,6 @@ weight: 30 --- # Scalability -See [Loki: Prometheus-inspired, open source logging for cloud natives](/blog/2018/12/12/loki-prometheus-inspired-open-source-logging-for-cloud-natives/) -for a discussion about Grafana Loki's scalability. - When scaling Loki, operators should consider running several Loki processes partitioned by role (ingester, distributor, querier) rather than a single Loki process. Grafana Labs' [production setup](https://github.com/grafana/loki/blob/master/production/ksonnet/loki) @@ -19,7 +16,7 @@ and scaling for resource usage. The Query frontend has an in-memory queue that can be moved out into a separate process similar to the [Grafana Mimir query-scheduler](/docs/mimir/latest/operators-guide/architecture/components/query-scheduler/). This allows running multiple query frontends. -To run with the Query Scheduler, the frontend needs to be passed the scheduler's address via `-frontend.scheduler-address` and the querier processes needs to be started with `-querier.scheduler-address` set to the same address. Both options can also be defined via the [configuration file]({{< relref "../configuration" >}}). +To run with the Query Scheduler, the frontend needs to be passed the scheduler's address via `-frontend.scheduler-address` and the querier processes needs to be started with `-querier.scheduler-address` set to the same address. Both options can also be defined via the [configuration file]({{< relref "../configuration/_index.md" >}}). It is not valid to start the querier with both a configured frontend and a scheduler address. diff --git a/docs/sources/tools/logcli.md b/docs/sources/tools/logcli.md index 7eee81ed3968..fde3f41ef6b2 100644 --- a/docs/sources/tools/logcli.md +++ b/docs/sources/tools/logcli.md @@ -250,26 +250,21 @@ usage: logcli query [] Run a LogQL query. -The "query" command is useful for querying for logs. Logs can be returned in a -few output modes: +The "query" command is useful for querying for logs. Logs can be returned in a few output modes: raw: log line default: log timestamp + log labels + log line jsonl: JSON response from Loki API of log line -The output of the log can be specified with the "-o" flag, for example, "-o raw" -for the raw output format. +The output of the log can be specified with the "-o" flag, for example, "-o raw" for the raw output format. -The "query" command will output extra information about the query and its -results, such as the API URL, set of common labels, and set of excluded labels. -This extra information can be suppressed with the --quiet flag. +The "query" command will output extra information about the query and its results, such as the API URL, set of common labels, and set of excluded labels. This extra information can be +suppressed with the --quiet flag. -By default we look over the last hour of data; use --since to modify or provide -specific start and end times with --from and --to respectively. +By default we look over the last hour of data; use --since to modify or provide specific start and end times with --from and --to respectively. -Notice that when using --from and --to then ensure to use RFC3339Nano time -format, but without timezone at the end. The local timezone will be added -automatically or if using --timezone flag. +Notice that when using --from and --to then ensure to use RFC3339Nano time format, but without timezone at the end. The local timezone will be added automatically or if using --timezone +flag. Example: @@ -282,101 +277,119 @@ Example: The output is limited to 30 entries by default; use --limit to increase. -While "query" does support metrics queries, its output contains multiple data -points between the start and end query time. This output is used to build -graphs, similar to what is seen in the Grafana Explore graph view. If you are -querying metrics and just want the most recent data point (like what is seen in -the Grafana Explore table view), then you should use the "instant-query" command -instead. +While "query" does support metrics queries, its output contains multiple data points between the start and end query time. This output is used to build graphs, similar to what is seen +in the Grafana Explore graph view. If you are querying metrics and just want the most recent data point (like what is seen in the Grafana Explore table view), then you should use the +"instant-query" command instead. + +Parallelization: + +You can download an unlimited number of logs in parallel, there are a few flags which control this behaviour: + + --parallel-duration + --parallel-max-workers + --part-path-prefix + --overwrite-completed-parts + --merge-parts + --keep-parts + +Refer to the help for each flag for details about what each of them do. + +Example: + + logcli query + --timezone=UTC + --from="2021-01-19T10:00:00Z" + --to="2021-01-19T20:00:00Z" + --output=jsonl + --parallel-duration="15m" + --parallel-max-workers="4" + --part-path-prefix="/tmp/my_query" + --merge-parts + 'my-query' + +This example will create a queue of jobs to execute, each being 15 minutes in duration. In this case, that means, for the 10-hour total duration, there will be forty 15-minute jobs. +The --limit flag is ignored. + +It will start four workers, and they will each take a job to work on from the queue until all the jobs have been completed. + +Each job will save a "part" file to the location specified by the --part-path-prefix. Different prefixes can be used to run multiple queries at the same time. The timestamp of the start and +end of the part is in the file name. While the part is being downloaded, the filename will end in ".part", when it is complete, the file will be renamed to remove this ".part" extension. +By default, if a completed part file is found, that part will not be downloaded again. This can be overridden with the --overwrite-completed-parts flag. + +Part file example using the previous command, adding --keep-parts so they are not deleted: + +Since we don't have the --forward flag, the parts will be downloaded in reverse. Two of the workers have finished their jobs (last two files), and have picked up the next jobs in the queue. +Running ls, this is what we should expect to see. + +$ ls -1 /tmp/my_query* /tmp/my_query_20210119T183000_20210119T184500.part.tmp /tmp/my_query_20210119T184500_20210119T190000.part.tmp /tmp/my_query_20210119T190000_20210119T191500.part.tmp +/tmp/my_query_20210119T191500_20210119T193000.part.tmp /tmp/my_query_20210119T193000_20210119T194500.part /tmp/my_query_20210119T194500_20210119T200000.part + +If you do not specify the --merge-parts flag, the part files will be downloaded, and logcli will exit, and you can process the files as you wish. With the flag specified, the part files +will be read in order, and the output printed to the terminal. The lines will be printed as soon as the next part is complete, you don't have to wait for all the parts to download before +getting output. The --merge-parts flag will remove the part files when it is done reading each of them. To change this, you can use the --keep-parts flag, and the part files will not be +removed. Flags: - --help Show context-sensitive help (also try --help-long - and --help-man). - --version Show application version. - -q, --quiet Suppress query metadata - --stats Show query statistics - -o, --output=default Specify output mode [default, raw, jsonl]. raw - suppresses log labels and timestamp. - -z, --timezone=Local Specify the timezone to use when formatting output - timestamps [Local, UTC] - --cpuprofile="" Specify the location for writing a CPU profile. - --memprofile="" Specify the location for writing a memory profile. - --stdin Take input logs from stdin - --addr="http://localhost:3100" - Server address. Can also be set using LOKI_ADDR - env var. - --username="" Username for HTTP basic auth. Can also be set - using LOKI_USERNAME env var. - --password="" Password for HTTP basic auth. Can also be set - using LOKI_PASSWORD env var. - --ca-cert="" Path to the server Certificate Authority. Can also - be set using LOKI_CA_CERT_PATH env var. - --tls-skip-verify Server certificate TLS skip verify. Can also be - set using LOKI_TLS_SKIP_VERIFY env var. - --cert="" Path to the client certificate. Can also be set - using LOKI_CLIENT_CERT_PATH env var. - --key="" Path to the client certificate key. Can also be - set using LOKI_CLIENT_KEY_PATH env var. - --org-id="" adds X-Scope-OrgID to API requests for - representing tenant ID. Useful for requesting - tenant data when bypassing an auth gateway. Can - also be set using LOKI_ORG_ID env var. - --query-tags="" adds X-Query-Tags http header to API requests. - This header value will be part of `metrics.go` - statistics. Useful for tracking the query. Can - also be set using LOKI_QUERY_TAGS env var. - --bearer-token="" adds the Authorization header to API requests for - authentication purposes. Can also be set using - LOKI_BEARER_TOKEN env var. - --bearer-token-file="" adds the Authorization header to API requests for - authentication purposes. Can also be set using - LOKI_BEARER_TOKEN_FILE env var. - --retries=0 How many times to retry each query when getting an - error response from Loki. Can also be set using - LOKI_CLIENT_RETRIES env var. - --min-backoff=0 Minimum backoff time between retries. Can also be - set using LOKI_CLIENT_MIN_BACKOFF env var. - --max-backoff=0 Maximum backoff time between retries. Can also be - set using LOKI_CLIENT_MAX_BACKOFF env var. - --auth-header="Authorization" - The authorization header used. Can also be set - using LOKI_AUTH_HEADER env var. - --proxy-url="" The http or https proxy to use when making - requests. Can also be set using - LOKI_HTTP_PROXY_URL env var. - --limit=30 Limit on number of entries to print. - --since=1h Lookback window. - --from=FROM Start looking for logs at this absolute time - (inclusive) - --to=TO Stop looking for logs at this absolute time - (exclusive) - --step=STEP Query resolution step width, for metric queries. - Evaluate the query at the specified step over the - time range. - --interval=INTERVAL Query interval, for log queries. Return entries at - the specified interval, ignoring those between. - **This parameter is experimental, please see Issue - 1779** - --batch=1000 Query batch size to use until 'limit' is reached - --forward Scan forwards through logs. - --no-labels Do not print any labels - --exclude-label=EXCLUDE-LABEL ... - Exclude labels given the provided key during - output. - --include-label=INCLUDE-LABEL ... - Include labels given the provided key during - output. - --labels-length=0 Set a fixed padding to labels - --store-config="" Execute the current query using a configured - storage from a given Loki configuration file. - --remote-schema Execute the current query using a remote schema - retrieved using the configured storage in the - given Loki configuration file. - --colored-output Show output with colored labels - -t, --tail Tail the logs - -f, --follow Alias for --tail - --delay-for=0 Delay in tailing by number of seconds to - accumulate logs for re-ordering + --help Show context-sensitive help (also try --help-long and --help-man). + --version Show application version. + -q, --quiet Suppress query metadata + --stats Show query statistics + -o, --output=default Specify output mode [default, raw, jsonl]. raw suppresses log labels and timestamp. + -z, --timezone=Local Specify the timezone to use when formatting output timestamps [Local, UTC] + --cpuprofile="" Specify the location for writing a CPU profile. + --memprofile="" Specify the location for writing a memory profile. + --stdin Take input logs from stdin + --addr="http://localhost:3100" + Server address. Can also be set using LOKI_ADDR env var. + --username="" Username for HTTP basic auth. Can also be set using LOKI_USERNAME env var. + --password="" Password for HTTP basic auth. Can also be set using LOKI_PASSWORD env var. + --ca-cert="" Path to the server Certificate Authority. Can also be set using LOKI_CA_CERT_PATH env var. + --tls-skip-verify Server certificate TLS skip verify. Can also be set using LOKI_TLS_SKIP_VERIFY env var. + --cert="" Path to the client certificate. Can also be set using LOKI_CLIENT_CERT_PATH env var. + --key="" Path to the client certificate key. Can also be set using LOKI_CLIENT_KEY_PATH env var. + --org-id="" adds X-Scope-OrgID to API requests for representing tenant ID. Useful for requesting tenant data when bypassing an auth gateway. Can also be set using + LOKI_ORG_ID env var. + --query-tags="" adds X-Query-Tags http header to API requests. This header value will be part of `metrics.go` statistics. Useful for tracking the query. Can also be set + using LOKI_QUERY_TAGS env var. + --bearer-token="" adds the Authorization header to API requests for authentication purposes. Can also be set using LOKI_BEARER_TOKEN env var. + --bearer-token-file="" adds the Authorization header to API requests for authentication purposes. Can also be set using LOKI_BEARER_TOKEN_FILE env var. + --retries=0 How many times to retry each query when getting an error response from Loki. Can also be set using LOKI_CLIENT_RETRIES env var. + --min-backoff=0 Minimum backoff time between retries. Can also be set using LOKI_CLIENT_MIN_BACKOFF env var. + --max-backoff=0 Maximum backoff time between retries. Can also be set using LOKI_CLIENT_MAX_BACKOFF env var. + --auth-header="Authorization" + The authorization header used. Can also be set using LOKI_AUTH_HEADER env var. + --proxy-url="" The http or https proxy to use when making requests. Can also be set using LOKI_HTTP_PROXY_URL env var. + --limit=30 Limit on number of entries to print. Setting it to 0 will fetch all entries. + --since=1h Lookback window. + --from=FROM Start looking for logs at this absolute time (inclusive) + --to=TO Stop looking for logs at this absolute time (exclusive) + --step=STEP Query resolution step width, for metric queries. Evaluate the query at the specified step over the time range. + --interval=INTERVAL Query interval, for log queries. Return entries at the specified interval, ignoring those between. **This parameter is experimental, please see Issue 1779** + --batch=1000 Query batch size to use until 'limit' is reached + --parallel-duration=1h Split the range into jobs of this length to download the logs in parallel. This will result in the logs being out of order. Use --part-path-prefix to create + a file per job to maintain ordering. + --parallel-max-workers=1 Max number of workers to start up for parallel jobs. A value of 1 will not create any parallel workers. When using parallel workers, limit is ignored. + --part-path-prefix=PART-PATH-PREFIX + When set, each server response will be saved to a file with this prefix. Creates files in the format: 'prefix-utc_start-utc_end.part'. Intended to be used + with the parallel-* flags so that you can combine the files to maintain ordering based on the filename. Default is to write to stdout. + --overwrite-completed-parts + Overwrites completed part files. This will download the range again, and replace the original completed part file. Default will skip a range if it's part + file is already downloaded. + --merge-parts Reads the part files in order and writes the output to stdout. Original part files will be deleted with this option. + --keep-parts Overrides the default behaviour of --merge-parts which will delete the part files once all the files have been read. This option will keep the part files. + --forward Scan forwards through logs. + --no-labels Do not print any labels + --exclude-label=EXCLUDE-LABEL ... + Exclude labels given the provided key during output. + --include-label=INCLUDE-LABEL ... + Include labels given the provided key during output. + --labels-length=0 Set a fixed padding to labels + --store-config="" Execute the current query using a configured storage from a given Loki configuration file. + --remote-schema Execute the current query using a remote schema retrieved using the configured storage in the given Loki configuration file. + --colored-output Show output with colored labels + -t, --tail Tail the logs + -f, --follow Alias for --tail + --delay-for=0 Delay in tailing by number of seconds to accumulate logs for re-ordering Args: eg '{foo="bar",baz=~".*blip"} |~ ".*error.*"' diff --git a/go.mod b/go.mod index 4eed6b2db50f..b1cc487b4937 100644 --- a/go.mod +++ b/go.mod @@ -8,13 +8,14 @@ require ( cloud.google.com/go/storage v1.29.0 github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-storage-blob-go v0.14.0 - github.com/Azure/go-autorest/autorest/adal v0.9.21 + github.com/Azure/go-autorest/autorest/adal v0.9.22 github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 github.com/Masterminds/sprig/v3 v3.2.3 github.com/NYTimes/gziphandler v1.1.1 github.com/Shopify/sarama v1.38.1 github.com/Workiva/go-datastructures v1.0.53 github.com/alicebob/miniredis/v2 v2.30.0 + github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible github.com/aws/aws-sdk-go v1.44.187 github.com/baidubce/bce-sdk-go v0.9.141 github.com/bmatcuk/doublestar v1.3.4 @@ -25,7 +26,7 @@ require ( github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/cristalhq/hedgedhttp v0.7.2 github.com/davecgh/go-spew v1.1.1 - github.com/docker/docker v20.10.21+incompatible + github.com/docker/docker v20.10.23+incompatible github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8 github.com/drone/envsubst v1.0.3 github.com/dustin/go-humanize v1.0.0 @@ -66,7 +67,7 @@ require ( github.com/klauspost/compress v1.15.15 github.com/klauspost/pgzip v1.2.5 github.com/mattn/go-ieproxy v0.0.1 - github.com/minio/minio-go/v7 v7.0.32-0.20220706200439-ef3e45ed9cdb + github.com/minio/minio-go/v7 v7.0.45 github.com/mitchellh/go-wordwrap v1.0.1 github.com/mitchellh/mapstructure v1.5.0 github.com/modern-go/reflect2 v1.0.2 @@ -77,13 +78,14 @@ require ( github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e github.com/opentracing-contrib/go-stdlib v1.0.0 github.com/opentracing/opentracing-go v1.2.0 + github.com/oschwald/geoip2-golang v1.8.0 // github.com/pierrec/lz4 v2.0.5+incompatible github.com/pierrec/lz4/v4 v4.1.17 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 github.com/prometheus/common v0.39.0 - github.com/prometheus/prometheus v0.41.0 + github.com/prometheus/prometheus v0.42.0 github.com/segmentio/fasthash v1.0.3 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 @@ -98,16 +100,15 @@ require ( go.uber.org/atomic v1.10.0 go.uber.org/goleak v1.2.0 golang.org/x/crypto v0.5.0 - golang.org/x/net v0.5.0 + golang.org/x/net v0.7.0 golang.org/x/sync v0.1.0 - golang.org/x/sys v0.4.0 + golang.org/x/sys v0.5.0 golang.org/x/time v0.3.0 google.golang.org/api v0.109.0 google.golang.org/grpc v1.52.3 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 k8s.io/klog v1.0.0 ) @@ -118,11 +119,12 @@ require ( github.com/heroku/x v0.0.55 github.com/prometheus/alertmanager v0.25.0 github.com/prometheus/common/sigv4 v0.1.0 - github.com/thanos-io/objstore v0.0.0-20220715165016-ce338803bc1e + github.com/thanos-io/objstore v0.0.0-20230201072718-11ffbc490204 github.com/willf/bloom v2.0.3+incompatible - golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 + go4.org/netipx v0.0.0-20230125063823-8449b0a6169f + golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874 golang.org/x/oauth2 v0.4.0 - golang.org/x/text v0.6.0 + golang.org/x/text v0.7.0 ) require ( @@ -132,6 +134,10 @@ require ( cloud.google.com/go/iam v0.8.0 // indirect cloud.google.com/go/longrunning v0.3.0 // indirect github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect @@ -140,6 +146,7 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.0 // indirect github.com/Microsoft/go-winio v0.5.1 // indirect @@ -168,7 +175,7 @@ require ( github.com/coreos/go-systemd/v22 v22.4.0 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/digitalocean/godo v1.91.1 // indirect + github.com/digitalocean/godo v1.95.0 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect @@ -178,8 +185,7 @@ require ( github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/efficientgo/e2e v0.12.2-0.20220714084440-2f5240d8c363 // indirect - github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b // indirect + github.com/efficientgo/core v1.0.0-rc.2 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/envoyproxy/go-control-plane v0.10.3 // indirect github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect @@ -197,15 +203,15 @@ require ( github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/validate v0.22.0 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect - github.com/gofrs/flock v0.7.1 // indirect + github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/googleapis v1.4.0 // indirect - github.com/golang-jwt/jwt/v4 v4.4.2 // indirect + github.com/golang-jwt/jwt/v4 v4.4.3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.1.2 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93 // indirect + github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/gophercloud/gophercloud v1.1.1 // indirect @@ -231,7 +237,8 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/cpuid/v2 v2.0.14 // indirect + github.com/klauspost/cpuid/v2 v2.1.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -249,15 +256,17 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/oschwald/maxminddb-golang v1.10.0 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/exporter-toolkit v0.8.2 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rs/xid v1.4.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver v2.4.0+incompatible // indirect github.com/shopspring/decimal v1.2.0 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -280,22 +289,20 @@ require ( go.opentelemetry.io/otel/trace v1.11.2 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect - go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect golang.org/x/mod v0.7.0 // indirect - golang.org/x/term v0.4.0 // indirect - golang.org/x/tools v0.4.0 // indirect + golang.org/x/term v0.5.0 // indirect + golang.org/x/tools v0.5.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.6 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - k8s.io/api v0.26.0 // indirect - k8s.io/apimachinery v0.26.0 // indirect - k8s.io/client-go v0.26.0 // indirect + k8s.io/api v0.26.1 // indirect + k8s.io/apimachinery v0.26.1 // indirect + k8s.io/client-go v0.26.1 // indirect k8s.io/klog/v2 v2.80.1 // indirect k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 // indirect k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect diff --git a/go.sum b/go.sum index db37ee3c5521..4748da98efd5 100644 --- a/go.sum +++ b/go.sum @@ -86,6 +86,14 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v36.2.0+incompatible h1:09cv2WoH0g6jl6m2iT+R9qcIPZKhXEL0sbmLhxP895s= github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 h1:sVW/AFBTGyJxDaMYlq0ct3jUXTtj12tQ6zE2GZUgVQw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0/go.mod h1:NBanQUfSWiWn3QEpWDTCU0IjBECKOYvl2R8xdRtMtiM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 h1:Oj853U9kG+RLTCQXpjvOnrv0WaZHxgmZz1TlLywgOPY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxTJK8x/Ds0MbBb4MfuW5BL0zMJnI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU= github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -105,8 +113,8 @@ github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= -github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U= +github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= @@ -135,6 +143,8 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM= +github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -198,6 +208,7 @@ github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGn github.com/alicebob/miniredis/v2 v2.30.0 h1:uA3uhDbCxfO9+DI/DuGeAMr9qI+noVWwGPNTFuKID5M= github.com/alicebob/miniredis/v2 v2.30.0/go.mod h1:84TWKZlxYkfgMucPBf5SOQBYJceZeQRFIaQgNMiCX6Q= github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible h1:9gWa46nstkJ9miBReJcN8Gq34cBFbzSpQZVVT9N09TM= +github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -253,6 +264,7 @@ github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnw github.com/axiomhq/hyperloglog v0.0.0-20180317131949-fe9507de0228/go.mod h1:IOXAcuKIFq/mDyuQ4wyJuJ79XLMsmLM+5RdQ+vWrL7o= github.com/baidubce/bce-sdk-go v0.9.141 h1:EV5BH5lfymIGPSmYDo9xYdsVlvWAW6nFeiA6t929zBE= github.com/baidubce/bce-sdk-go v0.9.141/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= @@ -359,17 +371,18 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/digitalocean/godo v1.1.1/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= github.com/digitalocean/godo v1.10.0/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= -github.com/digitalocean/godo v1.91.1 h1:1o30VOCu1aC6488qBd0SkQiBeAZ35RSTvLwCA1pQMhc= -github.com/digitalocean/godo v1.91.1/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= +github.com/digitalocean/godo v1.95.0 h1:S48/byPKui7RHZc1wYEPfRvkcEvToADNb5I3guu95xg= +github.com/digitalocean/godo v1.95.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= -github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.23+incompatible h1:1ZQUUYAdh+oylOT85aA2ZcfRp22jmLhoaEcVEfK8dyA= +github.com/docker/docker v20.10.23+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -389,7 +402,6 @@ github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= @@ -403,11 +415,9 @@ github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7j github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/efficientgo/e2e v0.12.2-0.20220714084440-2f5240d8c363 h1:Nw7SeBNMBrX3s0BbDlAWuGhEEDcKLteMsMmPThj4sxQ= -github.com/efficientgo/e2e v0.12.2-0.20220714084440-2f5240d8c363/go.mod h1:0Jrqcog5+GlJkbC8ulPkgyRZwq+GsvjUlNt+B2swzJ8= -github.com/efficientgo/tools/core v0.0.0-20210129205121-421d0828c9a6/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= -github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b h1:ZHiD4/yE4idlbqvAO6iYCOYRzOMRpxkW+FKasRA3tsQ= -github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= +github.com/efficientgo/core v1.0.0-rc.2 h1:7j62qHLnrZqO3V3UA0AqOGd5d5aXV3AX6m/NZBHp78I= +github.com/efficientgo/core v1.0.0-rc.2/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps= +github.com/efficientgo/e2e v0.13.1-0.20220922081603-45de9fc588a8 h1:UFLc39BcUXahSNCLUrKjNGZABMUZaS4M74EZvTRnq3k= github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -570,8 +580,8 @@ github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBT github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.7.1 h1:DP+LD/t0njgoPBvT5MJLeliUIVQR03hiKR6vezdwHlc= -github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -590,8 +600,9 @@ github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= +github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -693,8 +704,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93 h1:D5iJJZKAi0rU4e/5E58BkrnN+xeCDjAIqcm1GGxAGSI= -github.com/google/pprof v0.0.0-20221212185716-aee1124e3a93/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b h1:8htHrh2bw9c7Idkb7YNac+ZpTqLMjRpI+FWu51ltaQc= +github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= @@ -843,7 +854,7 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q= -github.com/hashicorp/nomad/api v0.0.0-20221214074818-7dbbf6bc584d h1:kEWrUx7mld3c6HRcO2KhfD1MYBkofuZfEfDwCRQ9aMU= +github.com/hashicorp/nomad/api v0.0.0-20230124213148-69fd1a0e4bf7 h1:XOdd3JHyeQnBRxotBo9ibxBFiYGuYhQU25s/YeV2cTU= github.com/hashicorp/raft v1.0.1-0.20190409200437-d9fe23f7d472/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= @@ -857,7 +868,7 @@ github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKe github.com/heroku/rollrus v0.2.0/go.mod h1:B3MwEcr9nmf4xj0Sr5l9eSht7wLKMa1C+9ajgAU79ek= github.com/heroku/x v0.0.55 h1:LSXseirdcQaVobauVkRLbN1VnxVmRQgRABrDA1Cz2Q8= github.com/heroku/x v0.0.55/go.mod h1:YZxbWdDeSewnf/CDZM2UXCZZPU9JZfObfms5FT3P8NA= -github.com/hetznercloud/hcloud-go v1.38.0 h1:K6Pd/mMdcLfBhvwG39qyAaacp4pCS3dKa8gChmLKxLg= +github.com/hetznercloud/hcloud-go v1.39.0 h1:RUlzI458nGnPR6dlcZlrsGXYC1hQlFbKdm8tVtEQQB0= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -953,8 +964,8 @@ github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7y github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.14 h1:QRqdp6bb9M9S5yyKeYteXKuoKE4p0tGlra81fKOpWH8= -github.com/klauspost/cpuid/v2 v2.0.14/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= +github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= +github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= @@ -972,6 +983,7 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leesper/go_rng v0.0.0-20171009123644-5344a9259b21/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= @@ -981,7 +993,7 @@ github.com/lib/pq v0.0.0-20180523175426-90697d60dd84/go.mod h1:5WUZQaWbwv1U+lTRe github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.9.3 h1:+lxNZw4avRxhCqGjwfPgQ2PvMT+vOL0OMsTdzixR7hQ= +github.com/linode/linodego v1.12.0 h1:33mOIrZ+gVva14gyJMKPZ85mQGovAvZCEP1ftgmFBjA= github.com/lstoll/grpce v1.7.0/go.mod h1:XiCWl3R+avNCT7KsTjv3qCblgsSqd0SC4ymySrH226g= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-validate v0.0.0-20180911180927-64fcb82c878e/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= @@ -1029,8 +1041,8 @@ github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7Xn github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.32-0.20220706200439-ef3e45ed9cdb h1:J7jRWqlD+K3Tp4YbLWcyBKiHoNRy49JR5HA4RetFrAY= -github.com/minio/minio-go/v7 v7.0.32-0.20220706200439-ef3e45ed9cdb/go.mod h1:/sjRKkKIA75CKh1iu8E3qBy7ktBmCCDGII0zbXGwbUk= +github.com/minio/minio-go/v7 v7.0.45 h1:g4IeM9M9pW/Lo8AGGNOjBZYlvmtlE1N5TQEYWXRWzIs= +github.com/minio/minio-go/v7 v7.0.45/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -1141,7 +1153,12 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/oracle/oci-go-sdk/v65 v65.13.0 h1:0+9ea5goYfhI3/MPfbIQU6yzHYWE6sCk6VuUepxk5Nk= github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/oschwald/geoip2-golang v1.8.0 h1:KfjYB8ojCEn/QLqsDU0AzrJ3R5Qa9vFlx3z6SLNcKTs= +github.com/oschwald/geoip2-golang v1.8.0/go.mod h1:R7bRvYjOeaoenAp9sKRS8GX5bJWcZ0laWO5+DauEktw= +github.com/oschwald/maxminddb-golang v1.10.0 h1:Xp1u0ZhqkSuopaKmk1WwHtjF0H9Hd9181uj2MQ5Vndg= +github.com/oschwald/maxminddb-golang v1.10.0/go.mod h1:Y2ELenReaLAZ0b400URyGwvYxHV1dLIxBuyOsyYjHK0= github.com/ovh/go-ovh v1.3.0 h1:mvZaddk4E4kLcXhzb+cxBsMPYp2pHqiQpWYkInsuZPQ= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= @@ -1159,6 +1176,8 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.2-0.20190227000051-27936f6d90f9/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1212,7 +1231,6 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.36.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= @@ -1231,10 +1249,11 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4 github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/prometheus v0.41.0 h1:+QR4QpzwE54zsKk2K7EUkof3tHxa3b/fyw7xJ4jR1Ns= -github.com/prometheus/prometheus v0.41.0/go.mod h1:Uu5817xm7ibU/VaDZ9pu1ssGzcpO9Bd+LyoZ76RpHyo= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/prometheus v0.42.0 h1:G769v8covTkOiNckXFIwLx01XE04OE6Fr0JPA0oR2nI= +github.com/prometheus/prometheus v0.42.0/go.mod h1:Pfqb/MLnnR2KK+0vchiaH39jXxvLMBk+3lnIGP4N7Vk= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= github.com/rcrowley/go-metrics v0.0.0-20160613154715-cfa5a85e9f0a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1260,8 +1279,9 @@ github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WS github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10 h1:wsfMs0iv+MJiViM37qh5VEKISi3/ZUq2nNKNdqmumAs= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.12 h1:Aaz4T7dZp7cB2cv7D/tGtRdSMh48sRaDYr7Jh0HV4qQ= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= @@ -1287,8 +1307,8 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= @@ -1341,10 +1361,10 @@ github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKs github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/tencentyun/cos-go-sdk-v5 v0.7.34 h1:xm+Pg+6m486y4eugRI7/E4WasbVmpY1hp9QBSRErgp8= +github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM= github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9/go.mod h1:RHkNRtSLfOK7qBTHaeSX1D6BNpI3qw7NTxsmNr4RvN8= -github.com/thanos-io/objstore v0.0.0-20220715165016-ce338803bc1e h1:IhC7gP1u/uA+yf9RYwhRVBq+2+HV1xRGcrY/C6WBaPY= -github.com/thanos-io/objstore v0.0.0-20220715165016-ce338803bc1e/go.mod h1:Fp62HaCG8R+5ak2g6+foU/Jag9JhtmpftVpubyS3S5s= +github.com/thanos-io/objstore v0.0.0-20230201072718-11ffbc490204 h1:W4w5Iph7j32Sf1QFWLJDCqvO0WgZS0jHGID+qnq3wV0= +github.com/thanos-io/objstore v0.0.0-20230201072718-11ffbc490204/go.mod h1:STSgpY8M6EKF2G/raUFdbIMf2U9GgYlEjAEHJxjvpAo= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= @@ -1478,9 +1498,7 @@ go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -1494,11 +1512,8 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -go4.org/intern v0.0.0-20211027215823-ae77deb06f29 h1:UXLjNohABv4S58tHmeuIZDO6e3mHpW2Dx33gaNt03LE= -go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 h1:FyBZqvoA/jbNzuAWLQE2kG820zMAkcilx6BMjGbL/E4= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/netipx v0.0.0-20230125063823-8449b0a6169f h1:ketMxHg+vWm3yccyYiq+uK8D3fRmna2Fcj+awpQp84s= +go4.org/netipx v0.0.0-20230125063823-8449b0a6169f/go.mod h1:tgPU4N2u9RByaTN3NC2p9xOzyFpte4jYwsIIRF7XlSc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1541,8 +1556,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 h1:OvjRkcNHnf6/W5FZXSxODbxwD+X7fspczG7Jn/xQVD4= -golang.org/x/exp v0.0.0-20221212164502-fae10dda9338/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874 h1:kWC3b7j6Fu09SnEBr7P4PuQyM0R6sqyH9R+EjIvT1nQ= +golang.org/x/exp v0.0.0-20230124195608-d38c7dcee874/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1650,8 +1665,8 @@ golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1775,6 +1790,7 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1798,20 +1814,22 @@ golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1824,8 +1842,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1862,7 +1880,6 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1907,8 +1924,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2068,8 +2085,8 @@ google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2 h1:O97sLx/Xmb/KIZHB/2/BzofxBs5QmmR0LcihPtllmbc= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v0.0.0-20180920234847-8997b5fa0873/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -2206,19 +2223,17 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 h1:acCzuUSQ79tGsM/O50VRFySfMm19IoMKL+sZztZkCxw= -inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6/go.mod h1:y3MGhcFMlh0KZPMuXXow8mpjxxAk3yoDNsp4cQz54i8= k8s.io/api v0.0.0-20180806132203-61b11ee65332/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.0.0-20190325185214-7544f9db76f6/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= -k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= +k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= +k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= k8s.io/apimachinery v0.0.0-20180821005732-488889b0007f/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.0.0-20190223001710-c182ff3b9841/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= -k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= -k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg= +k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= +k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= +k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= +k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= diff --git a/integration/loki_micro_services_delete_test.go b/integration/loki_micro_services_delete_test.go index e9b4da0a5ba0..20c16fc121bd 100644 --- a/integration/loki_micro_services_delete_test.go +++ b/integration/loki_micro_services_delete_test.go @@ -247,14 +247,18 @@ func TestMicroServicesDeleteRequest(t *testing.T) { require.Eventually(t, func() bool { deleteRequests, err := cliCompactor.GetDeleteRequests() require.NoError(t, err) - require.Len(t, deleteRequests, len(expectedDeleteRequests)) + + outer: for i := range deleteRequests { - if deleteRequests[i] != expectedDeleteRequests[i] { - return false + for j := range expectedDeleteRequests { + if deleteRequests[i] == expectedDeleteRequests[j] { + continue outer + } } + return false } return true - }, 10*time.Second, 1*time.Second) + }, 20*time.Second, 1*time.Second) // Check metrics metrics, err := cliCompactor.Metrics() diff --git a/integration/shared_test.go b/integration/shared_test.go index 53fb7de15722..61b469ecd004 100644 --- a/integration/shared_test.go +++ b/integration/shared_test.go @@ -5,8 +5,10 @@ import ( "time" ) +var randomGenerator *rand.Rand + func init() { - rand.Seed(time.Now().UnixNano()) + randomGenerator = rand.New(rand.NewSource(time.Now().UnixNano())) } var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") @@ -14,7 +16,7 @@ var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") func randStringRunes() string { b := make([]rune, 12) for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] + b[i] = letterRunes[randomGenerator.Intn(len(letterRunes))] } return string(b) } diff --git a/loki-build-image/Dockerfile b/loki-build-image/Dockerfile index 841ae7b5c768..0e258c6dccf9 100644 --- a/loki-build-image/Dockerfile +++ b/loki-build-image/Dockerfile @@ -5,7 +5,7 @@ # See ../docs/sources/maintaining/release-loki-build-image.md # Install helm (https://helm.sh/) and helm-docs (https://github.com/norwoodj/helm-docs) for generating Helm Chart reference. -FROM golang:1.19.5 as helm +FROM golang:1.20.1 as helm ARG HELM_VER="v3.2.3" RUN curl -L -o /tmp/helm-$HELM_VER.tgz https://get.helm.sh/helm-${HELM_VER}-linux-amd64.tar.gz && \ tar -xz -C /tmp -f /tmp/helm-$HELM_VER.tgz && \ @@ -13,7 +13,7 @@ RUN curl -L -o /tmp/helm-$HELM_VER.tgz https://get.helm.sh/helm-${HELM_VER}-linu rm -rf /tmp/linux-amd64 /tmp/helm-$HELM_VER.tgz RUN GO111MODULE=on go install github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.0 -FROM alpine:3.16.2 as lychee +FROM alpine:3.16.4 as lychee ARG LYCHEE_VER="0.7.0" RUN apk add --no-cache curl && \ curl -L -o /tmp/lychee-$LYCHEE_VER.tgz https://github.com/lycheeverse/lychee/releases/download/${LYCHEE_VER}/lychee-${LYCHEE_VER}-x86_64-unknown-linux-gnu.tar.gz && \ @@ -21,24 +21,24 @@ RUN apk add --no-cache curl && \ mv /tmp/lychee /usr/bin/lychee && \ rm -rf /tmp/linux-amd64 /tmp/lychee-$LYCHEE_VER.tgz -FROM alpine:3.16.2 as golangci +FROM alpine:3.16.4 as golangci RUN apk add --no-cache curl && \ cd / && \ - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.50.0 + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.51.2 -FROM alpine:3.16.2 as buf +FROM alpine:3.16.4 as buf RUN apk add --no-cache curl && \ curl -sSL "https://github.com/bufbuild/buf/releases/download/v1.4.0/buf-$(uname -s)-$(uname -m)" -o "/usr/bin/buf" && \ chmod +x "/usr/bin/buf" -FROM alpine:3.16.2 as docker +FROM alpine:3.16.4 as docker RUN apk add --no-cache docker-cli # TODO this should be fixed to download and extract the specific release binary from github as we do for golangci and helm above # however we need a commit which hasn't been released yet: https://github.com/drone/drone-cli/commit/1fad337d74ca0ecf420993d9d2d7229a1c99f054 # Read the comment below regarding GO111MODULE=on and why it is necessary -FROM golang:1.19.5 as drone +FROM golang:1.20.1 as drone RUN curl -L https://github.com/drone/drone-cli/releases/download/v1.4.0/drone_linux_amd64.tar.gz | tar zx && \ install -t /usr/local/bin drone @@ -47,32 +47,32 @@ RUN curl -L https://github.com/drone/drone-cli/releases/download/v1.4.0/drone_li # Error: # github.com/fatih/faillint@v1.5.0 requires golang.org/x/tools@v0.0.0-20200207224406-61798d64f025 # (not golang.org/x/tools@v0.0.0-20190918214920-58d531046acd from golang.org/x/tools/cmd/goyacc@58d531046acdc757f177387bc1725bfa79895d69) -FROM golang:1.19.5 as faillint +FROM golang:1.20.1 as faillint RUN GO111MODULE=on go install github.com/fatih/faillint@v1.11.0 -FROM golang:1.19.5 as delve +FROM golang:1.20.1 as delve RUN GO111MODULE=on go install github.com/go-delve/delve/cmd/dlv@latest # Install ghr used to push binaries and template the release # This collides with the version of go tools used in the base image, thus we install it in its own image and copy it over. -FROM golang:1.19.5 as ghr +FROM golang:1.20.1 as ghr RUN GO111MODULE=on go install github.com/tcnksm/ghr@9349474 # Install nfpm (https://nfpm.goreleaser.com) for creating .deb and .rpm packages. -FROM golang:1.19.5 as nfpm +FROM golang:1.20.1 as nfpm RUN GO111MODULE=on go install github.com/goreleaser/nfpm/v2/cmd/nfpm@v2.11.3 # Install gotestsum -FROM golang:1.19.5 as gotestsum +FROM golang:1.20.1 as gotestsum RUN GO111MODULE=on go install gotest.tools/gotestsum@v1.8.2 # Install tools used to compile jsonnet. -FROM golang:1.19.5 as jsonnet +FROM golang:1.20.1 as jsonnet RUN GO111MODULE=on go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@v0.4.0 RUN GO111MODULE=on go install github.com/monitoring-mixins/mixtool/cmd/mixtool@bca3066 RUN GO111MODULE=on go install github.com/google/go-jsonnet/cmd/jsonnet@v0.18.0 -FROM golang:1.19.5-buster +FROM golang:1.20.1-buster RUN apt-get update && \ apt-get install -qy \ musl gnupg ragel \ diff --git a/loki-build-image/version-updater.sh b/loki-build-image/version-updater.sh index 75203c34a186..f890a2ad46ec 100755 --- a/loki-build-image/version-updater.sh +++ b/loki-build-image/version-updater.sh @@ -19,7 +19,7 @@ fi echo "Updating loki-build-image references to '${VERSION}'" -find . -type f \( -name '*.yml' -o -name '*.yaml' -o -name '*Dockerfile*' \) -exec grep -lE "grafana/loki-build-image:[0-9]+" {} \; | grep -ve '.drone' | +find . -type f \( -name '*.yml' -o -name '*.yaml' -o -name '*Dockerfile*' -o -name '*devcontainer.json' \) -exec grep -lE "grafana/loki-build-image:[0-9]+" {} \; | grep -ve '.drone' | while read -r x; do echo "Updating ${x}" ${SED} -i -re "s,grafana/loki-build-image:[0-9]+\.[0-9]+\.[0-9]+,grafana/loki-build-image:${VERSION},g" "${x}" diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md index f4d3a91a9e55..300d5f134cc0 100644 --- a/operator/CHANGELOG.md +++ b/operator/CHANGELOG.md @@ -1,5 +1,7 @@ ## Main +- [8578](https://github.com/grafana/loki/pull/8578) **xperimental**: Refactor status update to reduce API calls +- [8577](https://github.com/grafana/loki/pull/8577) **Red-GV**: Store gateway tenant information in secret instead of configmap - [8397](https://github.com/grafana/loki/pull/8397) **periklis**: Update Loki operand to v2.7.3 - [8308](https://github.com/grafana/loki/pull/8308) **aminesnow**: operator: Cleanup ruler resources when disabled - [8336](https://github.com/grafana/loki/pull/8336) **periklis**: Update Loki operand to v2.7.2 @@ -9,6 +11,9 @@ - [8173](https://github.com/grafana/loki/pull/8173) **periklis**: Remove custom webhook cert mounts for OLM-based deployment (OpenShift) - [8001](https://github.com/grafana/loki/pull/8001) **aminesnow**: Add API validation to Alertmanager header auth config - [8087](https://github.com/grafana/loki/pull/8087) **xperimental**: Fix status not updating when state of pods changes + +## 0.1.0 (2023-01-10) + - [8068](https://github.com/grafana/loki/pull/8068) **periklis**: Use lokistack-gateway replicas from size table - [8068](https://github.com/grafana/loki/pull/8068) **periklis**: Use lokistack-gateway replicas from size table - [7839](https://github.com/grafana/loki/pull/7839) **aminesnow**: Configure Alertmanager per-tenant diff --git a/operator/Dockerfile.cross b/operator/Dockerfile.cross index 09d9cebe89d8..a06cb5f83599 100644 --- a/operator/Dockerfile.cross +++ b/operator/Dockerfile.cross @@ -1,4 +1,4 @@ -ARG BUILD_IMAGE=grafana/loki-build-image:0.27.1 +ARG BUILD_IMAGE=grafana/loki-build-image:0.28.1 FROM golang:1.19.1-alpine as goenv RUN go env GOARCH > /goarch && \ diff --git a/operator/Makefile b/operator/Makefile index e2d72b283a7b..3bba3165966a 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -10,15 +10,10 @@ WEBSITE_BASE_URL ?= https://loki-operator.dev .DEFAULT_GOAL := default default: all -# CLUSTER_LOGGING_VERSION -# defines the version of the OpenShift Cluster Logging product. -# Updates this value when a new version of the product should include this operator and its bundle. -CLUSTER_LOGGING_VERSION ?= 5.1.preview.1 - # LOKI_OPERATOR_NS # defines the default namespace of the Loki Operator in OpenShift. # Loki Operator will be installed in this namespace. -LOKI_OPERATOR_NS ?= openshift-operators-redhat +LOKI_OPERATOR_NS ?= kubernetes-operators # VERSION # defines the project version for the bundle. @@ -26,9 +21,34 @@ LOKI_OPERATOR_NS ?= openshift-operators-redhat # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= v0.0.1 -CHANNELS ?= "stable" -DEFAULT_CHANNEL ?= "stable" +VERSION ?= v0.1.0 +CHANNELS ?= "alpha" +DEFAULT_CHANNEL ?= "alpha" +SUPPORTED_OCP_VERSIONS="v4.10" + +# REGISTRY_BASE +# defines the container registry and organization for the bundle and operator container images. +REGISTRY_BASE_COMMUNITY = docker.io/grafana +REGISTRY_BASE_OPENSHIFT = quay.io/openshift-logging +REGISTRY_BASE ?= $(REGISTRY_BASE_COMMUNITY) + +# TODO(@periklis): Replace this image tag with VERSION once we have GH tags +MAIN_IMAGE_TAG = main-39f2856 + +# Customize for variants: community or openshift +VARIANT ?= community +ifeq ($(VARIANT), openshift) +ifeq ($(REGISTRY_BASE), $(REGISTRY_BASE_COMMUNITY)) + REGISTRY_BASE = $(REGISTRY_BASE_OPENSHIFT) +endif + CHANNELS = stable + DEFAULT_CHANNEL = stable + LOKI_OPERATOR_NS = openshift-operators-redhat + MAIN_IMAGE_TAG = $(VERSION) +endif + +# Image URL to use all building/pushing image targets +IMG ?= $(REGISTRY_BASE)/loki-operator:$(MAIN_IMAGE_TAG) # CHANNELS define the bundle channels used in the bundle. # Add a new line here if you would like to change its default config. (E.g CHANNELS = "preview,fast,stable") @@ -49,15 +69,17 @@ BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) endif BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) -REGISTRY_ORG ?= openshift-logging - # BUNDLE_IMG defines the image:tag used for the bundle. # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) -BUNDLE_IMG ?= quay.io/$(REGISTRY_ORG)/loki-operator-bundle:$(VERSION) +BUNDLE_IMG ?= $(REGISTRY_BASE)/loki-operator-bundle:$(VERSION) # BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(subst v,,$(VERSION)) $(BUNDLE_METADATA_OPTS) +MANIFESTS_DIR = config/manifests/$(VARIANT) +BUNDLE_DIR = ./bundle/$(VARIANT) +BUNDLE_BUILD_GEN_FLAGS ?= $(BUNDLE_GEN_FLAGS) --output-dir . --kustomize-dir ../../$(MANIFESTS_DIR) + # USE_IMAGE_DIGESTS defines if images are resolved via tags or digests # You can enable this value if you would like to use SHA Based Digests # To enable set flag to true @@ -66,13 +88,10 @@ ifeq ($(USE_IMAGE_DIGESTS), true) BUNDLE_GEN_FLAGS += --use-image-digests endif -CALCULATOR_IMG ?= quay.io/$(REGISTRY_ORG)/storage-size-calculator:latest +CALCULATOR_IMG ?= $(REGISTRY_BASE)/storage-size-calculator:latest GO_FILES := $(shell find . -type f -name '*.go') -# Image URL to use all building/pushing image targets -IMG ?= quay.io/$(REGISTRY_ORG)/loki-operator:$(VERSION) - # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) GOBIN=$(shell go env GOPATH)/bin @@ -145,8 +164,9 @@ test-unit-prometheus: $(PROMTOOL) ## Run prometheus unit tests @$(PROMTOOL) test rules ./internal/manifests/internal/alerts/testdata/test.yaml .PHONY: scorecard -scorecard: generate go-generate bundle ## Run scorecard test - $(OPERATOR_SDK) scorecard bundle +scorecard: generate go-generate bundle-all ## Run scorecard tests for all bundles (community, openshift) + $(OPERATOR_SDK) scorecard -c ./bundle/community/tests/scorecard/config.yaml bundle/community + $(OPERATOR_SDK) scorecard -c ./bundle/openshift/tests/scorecard/config.yaml bundle/openshift .PHONY: lint lint: $(GOLANGCI_LINT) | generate ## Run golangci-lint on source code. @@ -168,16 +188,21 @@ oci-build: ## Build the image oci-push: ## Push the image $(OCI_RUNTIME) push ${IMG} -.PHONY: bundle ## Generate bundle manifests and metadata, then validate generated files. -bundle: manifests $(KUSTOMIZE) $(OPERATOR_SDK) - $(OPERATOR_SDK) generate kustomize manifests -q +.PHONY: bundle-all +bundle-all: ## Generate both bundles. + $(MAKE) bundle + $(MAKE) bundle VARIANT=openshift + +.PHONY: bundle +bundle: manifests $(KUSTOMIZE) $(OPERATOR_SDK) ## Generate variant bundle manifests and metadata, then validate generated files. + $(OPERATOR_SDK) generate kustomize manifests -q --input-dir $(MANIFESTS_DIR) --output-dir $(MANIFESTS_DIR) cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) - $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) - $(OPERATOR_SDK) bundle validate ./bundle + cd $(BUNDLE_DIR) && cp ../../PROJECT . && $(KUSTOMIZE) build ../../$(MANIFESTS_DIR) | $(OPERATOR_SDK) generate bundle $(BUNDLE_BUILD_GEN_FLAGS) && rm PROJECT + $(OPERATOR_SDK) bundle validate $(BUNDLE_DIR) .PHONY: bundle-build -bundle-build: ## Build the bundle image. - $(OCI_RUNTIME) build -f bundle.Dockerfile -t $(BUNDLE_IMG) . +bundle-build: ## Build the community bundle image + $(OCI_RUNTIME) build -f $(BUNDLE_DIR)/bundle.Dockerfile -t $(BUNDLE_IMG) $(BUNDLE_DIR) ##@ Deployment @@ -231,7 +256,7 @@ olm-deploy-operator: oci-build oci-push .PHONY: olm-deploy ifeq ($(or $(findstring openshift-logging,$(IMG)),$(findstring openshift-logging,$(BUNDLE_IMG))),openshift-logging) olm-deploy: ## Deploy the operator bundle and the operator via OLM into an Kubernetes cluster selected via KUBECONFIG. - $(error Set variable REGISTRY_ORG to use a custom container registry org account for local development) + $(error Set variable REGISTRY_BASE to use a custom container registry org account for local development) else olm-deploy: olm-deploy-bundle olm-deploy-operator $(OPERATOR_SDK) $(OPERATOR_SDK) run bundle -n $(LOKI_OPERATOR_NS) --install-mode AllNamespaces $(BUNDLE_IMG) @@ -240,7 +265,7 @@ endif .PHONY: olm-upgrade ifeq ($(or $(findstring openshift-logging,$(IMG)),$(findstring openshift-logging,$(BUNDLE_IMG))),openshift-logging) olm-upgrade: ## Upgrade the operator bundle and the operator via OLM into an Kubernetes cluster selected via KUBECONFIG. - $(error Set variable REGISTRY_ORG to use a custom container registry org account for local development) + $(error Set variable REGISTRY_BASE to use a custom container registry org account for local development) else olm-upgrade: olm-deploy-bundle olm-deploy-operator $(OPERATOR_SDK) $(OPERATOR_SDK) run bundle-upgrade -n $(LOKI_OPERATOR_NS) $(BUNDLE_IMG) @@ -253,7 +278,7 @@ olm-undeploy: $(OPERATOR_SDK) ## Cleanup deployments of the operator bundle and .PHONY: deploy-size-calculator ifeq ($(findstring openshift-logging,$(CALCULATOR_IMG)),openshift-logging) deploy-size-calculator: ## Deploy storage size calculator (OpenShift only!) - $(error Set variable REGISTRY_ORG to use a custom container registry org account for local development) + $(error Set variable REGISTRY_BASE to use a custom container registry org account for local development) else deploy-size-calculator: $(KUSTOMIZE) ## Deploy storage size calculator (OpenShift only!) kubectl apply -f config/overlays/openshift/size-calculator/cluster_monitoring_config.yaml @@ -305,3 +330,13 @@ web: $(HUGO) | web-pre ## Run production build of the loki-operator.dev website .PHONY: web-serve web-serve: $(HUGO) | web-pre ## Run local preview version of the loki-operator.dev website @cd $(WEBSITE_DIR) && $(HUGO) serve + +.PHONY: operatorhub +operatorhub: check-operatorhub-pr-template + SUPPORTED_OCP_VERSIONS="$(SUPPORTED_OCP_VERSIONS)" ./hack/operatorhub.sh + +.PHONY: check-operatorhub-pr-template +check-operatorhub-pr-template: + curl https://raw.githubusercontent.com/operator-framework/community-operators/master/docs/pull_request_template.md -o hack/.operatorhub-pr-template.md -s > /dev/null 2>&1 + git diff -s --exit-code hack/.operatorhub-pr-template.md || (echo "Build failed: the PR template for OperatorHub has changed. Sync it and try again." && exit 1) + diff --git a/operator/bundle/community/bundle.Dockerfile b/operator/bundle/community/bundle.Dockerfile new file mode 100644 index 000000000000..d83b40681db8 --- /dev/null +++ b/operator/bundle/community/bundle.Dockerfile @@ -0,0 +1,21 @@ +FROM scratch + +# Core bundle labels. +LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 +LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ +LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ +LABEL operators.operatorframework.io.bundle.package.v1=loki-operator +LABEL operators.operatorframework.io.bundle.channels.v1=alpha +LABEL operators.operatorframework.io.bundle.channel.default.v1=alpha +LABEL operators.operatorframework.io.metrics.builder=operator-sdk-unknown +LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 +LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 + +# Labels for testing. +LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 +LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ + +# Copy files to locations specified by labels. +COPY ./manifests /manifests/ +COPY ./metadata /metadata/ +COPY ./tests/scorecard /tests/scorecard/ diff --git a/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml b/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml new file mode 100644 index 000000000000..513f11f753a0 --- /dev/null +++ b/operator/bundle/community/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.1.0 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/version: 0.1.0 + name: loki-operator-controller-manager-metrics-service +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + name: loki-operator-controller-manager +status: + loadBalancer: {} diff --git a/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml new file mode 100644 index 000000000000..15db38e84bae --- /dev/null +++ b/operator/bundle/community/manifests/loki-operator-manager-config_v1_configmap.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +data: + controller_manager_config.yaml: | + apiVersion: config.loki.grafana.com/v1 + kind: ProjectConfig + health: + healthProbeBindAddress: :8081 + metrics: + bindAddress: 127.0.0.1:8080 + webhook: + port: 9443 + leaderElection: + leaderElect: false + resourceName: e3716011.grafana.com + featureGates: + lokiStackGateway: true + runtimeSeccompProfile: false + # + # Webhook feature gates + # + lokiStackWebhook: true + alertingRuleWebhook: true + recordingRuleWebhook: true +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/instance: loki-operator-v0.1.0 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/version: 0.1.0 + name: loki-operator-manager-config diff --git a/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 000000000000..e616349aee5a --- /dev/null +++ b/operator/bundle/community/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.1.0 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/version: 0.1.0 + name: loki-operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get diff --git a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml new file mode 100644 index 000000000000..82424b1a36e1 --- /dev/null +++ b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml @@ -0,0 +1,25 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.1.0 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/version: 0.1.0 + name: loki-operator-prometheus +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch diff --git a/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml new file mode 100644 index 000000000000..fa95c2ba3f2a --- /dev/null +++ b/operator/bundle/community/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml @@ -0,0 +1,22 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.1.0 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/version: 0.1.0 + name: loki-operator-prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: loki-operator-prometheus +subjects: +- kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring diff --git a/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml b/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml new file mode 100644 index 000000000000..1dcaabf33cdc --- /dev/null +++ b/operator/bundle/community/manifests/loki-operator-webhook-service_v1_service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.1.0 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/version: 0.1.0 + name: loki-operator-webhook-service +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator +status: + loadBalancer: {} diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml new file mode 100644 index 000000000000..aefba85f89b6 --- /dev/null +++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml @@ -0,0 +1,1666 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "loki.grafana.com/v1", + "kind": "LokiStack", + "metadata": { + "name": "lokistack-sample" + }, + "spec": { + "size": "1x.small", + "storage": { + "secret": { + "name": "test" + } + }, + "storageClassName": "standard" + } + }, + { + "apiVersion": "loki.grafana.com/v1beta1", + "kind": "AlertingRule", + "metadata": { + "name": "alertingrule-sample" + }, + "spec": { + "groups": [ + { + "interval": "10m", + "name": "alerting-rules-group", + "rules": [ + { + "alert": "HighPercentageError", + "annotations": { + "summary": "High request latency" + }, + "expr": "sum(rate({app=\"foo\", env=\"production\"} |= \"error\" [5m])) by (job)\n /\nsum(rate({app=\"foo\", env=\"production\"}[5m])) by (job)\n \u003e 0.05\n", + "for": "10m", + "labels": { + "severity": "page" + } + }, + { + "alert": "HttpCredentialsLeaked", + "annotations": { + "message": "{{ $labels.job }} is leaking http basic auth credentials." + }, + "expr": "sum by (cluster, job, pod) (count_over_time({namespace=\"prod\"} |~ \"http(s?)://(\\\\w+):(\\\\w+)@\" [5m]) \u003e 0)", + "for": "10m", + "labels": { + "severity": "critical" + } + } + ] + } + ], + "tenantID": "test-tenant" + } + }, + { + "apiVersion": "loki.grafana.com/v1beta1", + "kind": "RecordingRule", + "metadata": { + "name": "recordingrule-sample" + }, + "spec": { + "groups": [ + { + "interval": "10m", + "name": "recording-rules-group", + "rules": [ + { + "expr": "sum(rate({container=\"myservice\"}[10m]))\n", + "record": "myservice:requests:rate10m" + }, + { + "expr": "sum(rate({container=\"otherservice\"}[1m]))\n", + "record": "otherservice:requests:rate1m" + } + ] + } + ], + "tenantID": "test-tenant" + } + }, + { + "apiVersion": "loki.grafana.com/v1beta1", + "kind": "RulerConfig", + "metadata": { + "name": "rulerconfig-sample" + }, + "spec": { + "alertmanager": { + "discovery": { + "enableSRV": true, + "refreshInterval": "1m" + }, + "enableV2": true, + "endpoints": [ + "http://alertmanager-host1.mycompany.org", + "http://alertmanager-host2.mycompany.org" + ], + "externalLabels": { + "environment": "production", + "region": "us-east-2" + }, + "externalUrl": "http://www.mycompany.org/alerts", + "notificationQueue": { + "capacity": 1000, + "forGracePeriod": "10m", + "forOutageTolerance": "1h", + "resendDelay": "1m", + "timeout": "30s" + } + }, + "evaluationInterval": "1m", + "pollInterval": "1m", + "remoteWrite": { + "client": { + "authorization": "basic", + "authorizationSecretName": "my-secret-resource", + "name": "remote-write-log-metrics", + "proxyUrl": "http://proxy-host.mycompany.org", + "relabelConfigs": [ + { + "action": "replace", + "regex": "ALERTS.*", + "replacement": "$1", + "separator": "", + "sourceLabels": [ + "labelc", + "labeld" + ], + "targetLabel": "labelnew" + } + ], + "timeout": "30s", + "url": "http://remote-write-host.mycompany.org" + }, + "enabled": true, + "refreshPeriod": "10s" + } + } + } + ] + capabilities: Full Lifecycle + categories: OpenShift Optional, Logging & Tracing + certified: "false" + containerImage: docker.io/grafana/loki-operator:main-39f2856 + createdAt: "2022-12-22T13:28:40+00:00" + description: The Community Loki Operator provides Kubernetes native deployment + and management of Loki and related logging components. + operators.operatorframework.io/builder: operator-sdk-unknown + operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 + repository: https://github.com/grafana/loki/tree/main/operator + support: Grafana Loki SIG Operator + labels: + operatorframework.io/arch.amd64: supported + operatorframework.io/arch.arm64: supported + name: loki-operator.v0.1.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: AlertingRule is the Schema for the alertingrules API + displayName: AlertingRule + kind: AlertingRule + name: alertingrules.loki.grafana.com + resources: + - kind: LokiStack + name: "" + version: v1 + specDescriptors: + - description: List of groups for alerting rules. + displayName: Groups + path: groups + - description: Interval defines the time interval between evaluation of the + given alerting rule. + displayName: Evaluation Interval + path: groups[0].interval + - description: Limit defines the number of alerts an alerting rule can produce. + 0 is no limit. + displayName: Limit of firing alerts + path: groups[0].limit + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Name of the alerting rule group. Must be unique within all alerting + rules. + displayName: Name + path: groups[0].name + - description: Rules defines a list of alerting rules + displayName: Rules + path: groups[0].rules + - description: The name of the alert. Must be a valid label value. + displayName: Name + path: groups[0].rules[0].alert + - description: Annotations to add to each alert. + displayName: Annotations + path: groups[0].rules[0].annotations + - description: The LogQL expression to evaluate. Every evaluation cycle this + is evaluated at the current time, and all resultant time series become pending/firing + alerts. + displayName: LogQL Expression + path: groups[0].rules[0].expr + - description: Alerts are considered firing once they have been returned for + this long. Alerts which have not yet fired for long enough are considered + pending. + displayName: Firing Threshold + path: groups[0].rules[0].for + - description: Labels to add to each alert. + displayName: Labels + path: groups[0].rules[0].labels + - description: TenantID of tenant where the alerting rules are evaluated in. + displayName: Tenant ID + path: tenantID + statusDescriptors: + - description: Conditions of the AlertingRule generation health. + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1beta1 + - description: LokiStack is the Schema for the lokistacks API + displayName: LokiStack + kind: LokiStack + name: lokistacks.loki.grafana.com + resources: + - kind: ConfigMap + name: "" + version: v1 + - kind: Deployment + name: "" + version: v1 + - kind: Ingress + name: "" + version: v1 + - kind: PersistentVolumeClaims + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Service + name: "" + version: v1 + - kind: ServiceAccount + name: "" + version: v1 + - kind: ServiceMonitor + name: "" + version: v1 + - kind: StatefulSet + name: "" + version: v1 + specDescriptors: + - description: Limits defines the limits to be applied to log stream processing. + displayName: Rate Limiting + path: limits + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Global defines the limits applied globally across the cluster. + displayName: Global Limits + path: limits.global + - description: IngestionBurstSize defines the local rate-limited sample size + per distributor replica. It should be set to the set at least to the maximum + logs size expected in a single push request. + displayName: Ingestion Burst Size (in MB) + path: limits.global.ingestion.ingestionBurstSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: IngestionRate defines the sample size per second. Units MB. + displayName: Ingestion Rate (in MB) + path: limits.global.ingestion.ingestionRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxGlobalStreamsPerTenant defines the maximum number of active + streams per tenant, across the cluster. + displayName: Max Global Streams per Tenant + path: limits.global.ingestion.maxGlobalStreamsPerTenant + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNameLength defines the maximum number of characters allowed + for label keys in log streams. + displayName: Max Label Name Length + path: limits.global.ingestion.maxLabelNameLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNamesPerSeries defines the maximum number of label names + per series in each log stream. + displayName: Max Labels Names per Series + path: limits.global.ingestion.maxLabelNamesPerSeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelValueLength defines the maximum number of characters + allowed for label values in log streams. + displayName: Max Label Value Length + path: limits.global.ingestion.maxLabelValueLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLineSize defines the maximum line size on ingestion path. + Units in Bytes. + displayName: Max Line Size + path: limits.global.ingestion.maxLineSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxChunksPerQuery defines the maximum number of chunks that can + be fetched by a single query. + displayName: Max Chunk per Query + path: limits.global.queries.maxChunksPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries + that will be returned for a query. + displayName: Max Entries Limit per Query + path: limits.global.queries.maxEntriesLimitPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxQuerySeries defines the the maximum of unique series that + is returned by a metric query. + displayName: Max Query Series + path: limits.global.queries.maxQuerySeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Timeout when querying ingesters or storage during the execution + of a query request. + displayName: Query Timeout + path: limits.global.queries.queryTimeout + - description: Tenants defines the limits applied per tenant. + displayName: Limits per Tenant + path: limits.tenants + - description: IngestionBurstSize defines the local rate-limited sample size + per distributor replica. It should be set to the set at least to the maximum + logs size expected in a single push request. + displayName: Ingestion Burst Size (in MB) + path: limits.tenants.ingestion.ingestionBurstSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: IngestionRate defines the sample size per second. Units MB. + displayName: Ingestion Rate (in MB) + path: limits.tenants.ingestion.ingestionRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxGlobalStreamsPerTenant defines the maximum number of active + streams per tenant, across the cluster. + displayName: Max Global Streams per Tenant + path: limits.tenants.ingestion.maxGlobalStreamsPerTenant + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNameLength defines the maximum number of characters allowed + for label keys in log streams. + displayName: Max Label Name Length + path: limits.tenants.ingestion.maxLabelNameLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNamesPerSeries defines the maximum number of label names + per series in each log stream. + displayName: Max Labels Names per Series + path: limits.tenants.ingestion.maxLabelNamesPerSeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelValueLength defines the maximum number of characters + allowed for label values in log streams. + displayName: Max Label Value Length + path: limits.tenants.ingestion.maxLabelValueLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLineSize defines the maximum line size on ingestion path. + Units in Bytes. + displayName: Max Line Size + path: limits.tenants.ingestion.maxLineSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxChunksPerQuery defines the maximum number of chunks that can + be fetched by a single query. + displayName: Max Chunk per Query + path: limits.tenants.queries.maxChunksPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries + that will be returned for a query. + displayName: Max Entries Limit per Query + path: limits.tenants.queries.maxEntriesLimitPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxQuerySeries defines the the maximum of unique series that + is returned by a metric query. + displayName: Max Query Series + path: limits.tenants.queries.maxQuerySeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Timeout when querying ingesters or storage during the execution + of a query request. + displayName: Query Timeout + path: limits.tenants.queries.queryTimeout + - description: ManagementState defines if the CR should be managed by the operator + or not. Default is managed. + displayName: Management State + path: managementState + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:Managed + - urn:alm:descriptor:com.tectonic.ui:select:Unmanaged + - description: Proxy defines the spec for the object proxy to configure cluster + proxy information. + displayName: Cluster Proxy + path: proxy + - description: HTTPProxy configures the HTTP_PROXY/http_proxy env variable. + displayName: HTTPProxy + path: proxy.httpProxy + - description: HTTPSProxy configures the HTTPS_PROXY/https_proxy env variable. + displayName: HTTPSProxy + path: proxy.httpsProxy + - description: NoProxy configures the NO_PROXY/no_proxy env variable. + displayName: NoProxy + path: proxy.noProxy + - description: ReplicationFactor defines the policy for log stream replication. + displayName: Replication Factor + path: replicationFactor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Rules defines the spec for the ruler component + displayName: Rules + path: rules + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Enabled defines a flag to enable/disable the ruler component + displayName: Enable + path: rules.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Namespaces to be selected for PrometheusRules discovery. If unspecified, + only the same namespace as the LokiStack object is in is used. + displayName: Namespace Selector + path: rules.namespaceSelector + - description: A selector to select which LokiRules to mount for loading alerting/recording + rules from. + displayName: Selector + path: rules.selector + - description: Size defines one of the support Loki deployment scale out sizes. + displayName: LokiStack Size + path: size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small + - urn:alm:descriptor:com.tectonic.ui:select:1x.small + - urn:alm:descriptor:com.tectonic.ui:select:1x.medium + - description: Storage defines the spec for the object storage endpoint to store + logs. + displayName: Object Storage + path: storage + - description: Version for writing and reading logs. + displayName: Version + path: storage.schemas[0].version + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:v11 + - urn:alm:descriptor:com.tectonic.ui:select:v12 + - description: Name of a secret in the namespace configured for object storage + secrets. + displayName: Object Storage Secret Name + path: storage.secret.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: Type of object storage that should be used + displayName: Object Storage Secret Type + path: storage.secret.type + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:azure + - urn:alm:descriptor:com.tectonic.ui:select:gcs + - urn:alm:descriptor:com.tectonic.ui:select:s3 + - urn:alm:descriptor:com.tectonic.ui:select:swift + - description: TLS configuration for reaching the object storage endpoint. + displayName: TLS Config + path: storage.tls + - description: Key is the data key of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the LokiStack custom resource. If + empty, it defaults to "service-ca.crt". + displayName: CA ConfigMap Key + path: storage.tls.caKey + - description: CA is the name of a ConfigMap containing a CA certificate. It + needs to be in the same namespace as the LokiStack custom resource. + displayName: CA ConfigMap Name + path: storage.tls.caName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:ConfigMap + - description: Storage class name defines the storage class for ingester/querier + PVCs. + displayName: Storage Class Name + path: storageClassName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:StorageClass + - description: Template defines the resource/limits/tolerations/nodeselectors + per component + displayName: Node Placement + path: template + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Compactor defines the compaction component spec. + displayName: Compactor pods + path: template.compactor + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.compactor.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Distributor defines the distributor component spec. + displayName: Distributor pods + path: template.distributor + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.distributor.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Gateway defines the lokistack gateway component spec. + displayName: Gateway pods + path: template.gateway + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.gateway.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: IndexGateway defines the index gateway component spec. + displayName: Index Gateway pods + path: template.indexGateway + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.indexGateway.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Ingester defines the ingester component spec. + displayName: Ingester pods + path: template.ingester + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.ingester.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Querier defines the querier component spec. + displayName: Querier pods + path: template.querier + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.querier.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: QueryFrontend defines the query frontend component spec. + displayName: Query Frontend pods + path: template.queryFrontend + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.queryFrontend.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Ruler defines the ruler component spec. + displayName: Ruler pods + path: template.ruler + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.ruler.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Tenants defines the per-tenant authentication and authorization + spec for the lokistack-gateway component. + displayName: Tenants Configuration + path: tenants + - description: Authentication defines the lokistack-gateway component authentication + configuration spec per tenant. + displayName: Authentication + path: tenants.authentication + - description: OIDC defines the spec for the OIDC tenant's authentication. + displayName: OIDC Configuration + path: tenants.authentication[0].oidc + - description: IssuerURL defines the URL for issuer. + displayName: Issuer URL + path: tenants.authentication[0].oidc.issuerURL + - description: RedirectURL defines the URL for redirect. + displayName: Redirect URL + path: tenants.authentication[0].oidc.redirectURL + - description: Secret defines the spec for the clientID, clientSecret and issuerCAPath + for tenant's authentication. + displayName: Tenant Secret + path: tenants.authentication[0].oidc.secret + - description: Name of a secret in the namespace configured for tenant secrets. + displayName: Tenant Secret Name + path: tenants.authentication[0].oidc.secret.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: TenantID defines the id of the tenant. + displayName: Tenant ID + path: tenants.authentication[0].tenantId + - description: TenantName defines the name of the tenant. + displayName: Tenant Name + path: tenants.authentication[0].tenantName + - description: Authorization defines the lokistack-gateway component authorization + configuration spec per tenant. + displayName: Authorization + path: tenants.authorization + - description: OPA defines the spec for the third-party endpoint for tenant's + authorization. + displayName: OPA Configuration + path: tenants.authorization.opa + - description: URL defines the third-party endpoint for authorization. + displayName: OpenPolicyAgent URL + path: tenants.authorization.opa.url + - description: RoleBindings defines configuration to bind a set of roles to + a set of subjects. + displayName: Static Role Bindings + path: tenants.authorization.roleBindings + - description: Roles defines a set of permissions to interact with a tenant. + displayName: Static Roles + path: tenants.authorization.roles + - description: Mode defines the mode in which lokistack-gateway component will + be configured. + displayName: Mode + path: tenants.mode + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:static + - urn:alm:descriptor:com.tectonic.ui:select:dynamic + - urn:alm:descriptor:com.tectonic.ui:select:openshift-logging + - urn:alm:descriptor:com.tectonic.ui:select:openshift-network + statusDescriptors: + - description: Distributor is a map to the per pod status of the distributor + deployment + displayName: Distributor + path: components.distributor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Ingester is a map to the per pod status of the ingester statefulset + displayName: Ingester + path: components.ingester + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Querier is a map to the per pod status of the querier deployment + displayName: Querier + path: components.querier + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: QueryFrontend is a map to the per pod status of the query frontend + deployment + displayName: Query Frontend + path: components.queryFrontend + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Compactor is a map to the pod status of the compactor pod. + displayName: Compactor + path: components.compactor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Gateway is a map to the per pod status of the lokistack gateway + deployment. + displayName: Gateway + path: components.gateway + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: IndexGateway is a map to the per pod status of the index gateway + statefulset + displayName: IndexGateway + path: components.indexGateway + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Ruler is a map to the per pod status of the lokistack ruler statefulset. + displayName: Ruler + path: components.ruler + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Conditions of the Loki deployment health. + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1 + - description: RecordingRule is the Schema for the recordingrules API + displayName: RecordingRule + kind: RecordingRule + name: recordingrules.loki.grafana.com + resources: + - kind: LokiStack + name: "" + version: v1 + specDescriptors: + - description: List of groups for recording rules. + displayName: Groups + path: groups + - description: Interval defines the time interval between evaluation of the + given recoding rule. + displayName: Evaluation Interval + path: groups[0].interval + - description: Limit defines the number of series a recording rule can produce. + 0 is no limit. + displayName: Limit of produced series + path: groups[0].limit + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Name of the recording rule group. Must be unique within all recording + rules. + displayName: Name + path: groups[0].name + - description: Rules defines a list of recording rules + displayName: Rules + path: groups[0].rules + - description: The LogQL expression to evaluate. Every evaluation cycle this + is evaluated at the current time, and all resultant time series become pending/firing + alerts. + displayName: LogQL Expression + path: groups[0].rules[0].expr + - description: The name of the time series to output to. Must be a valid metric + name. + displayName: Metric Name + path: groups[0].rules[0].record + - description: TenantID of tenant where the recording rules are evaluated in. + displayName: Tenant ID + path: tenantID + statusDescriptors: + - description: Conditions of the RecordingRule generation health. + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1beta1 + - description: RulerConfig is the Schema for the rulerconfigs API + displayName: RulerConfig + kind: RulerConfig + name: rulerconfigs.loki.grafana.com + resources: + - kind: LokiStack + name: "" + version: v1 + specDescriptors: + - description: Defines alert manager configuration to notify on firing alerts. + displayName: Alert Manager Configuration + path: alertmanager + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Client configuration for reaching the alertmanager endpoint. + displayName: TLS Config + path: alertmanager.client + - description: Basic authentication configuration for reaching the alertmanager + endpoints. + displayName: Basic Authentication + path: alertmanager.client.basicAuth + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: The subject's password for the basic authentication configuration. + displayName: Password + path: alertmanager.client.basicAuth.password + - description: The subject's username for the basic authentication configuration. + displayName: Username + path: alertmanager.client.basicAuth.username + - description: Header authentication configuration for reaching the alertmanager + endpoints. + displayName: Header Authentication + path: alertmanager.client.headerAuth + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: The credentials for the header authentication configuration. + displayName: Credentials + path: alertmanager.client.headerAuth.credentials + - description: The credentials file for the Header authentication configuration. + It is mutually exclusive with `credentials`. + displayName: Credentials File + path: alertmanager.client.headerAuth.credentialsFile + - description: The authentication type for the header authentication configuration. + displayName: Type + path: alertmanager.client.headerAuth.type + - description: TLS configuration for reaching the alertmanager endpoints. + displayName: TLS + path: alertmanager.client.tls + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: The CA certificate file path for the TLS configuration. + displayName: CA Path + path: alertmanager.client.tls.caPath + - description: The client-side certificate file path for the TLS configuration. + displayName: Cert Path + path: alertmanager.client.tls.certPath + - description: The client-side key file path for the TLS configuration. + displayName: Key Path + path: alertmanager.client.tls.keyPath + - description: The server name to validate in the alertmanager server certificates. + displayName: Server Name + path: alertmanager.client.tls.serverName + - description: Defines the configuration for DNS-based discovery of AlertManager + hosts. + displayName: DNS Discovery + path: alertmanager.discovery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Use DNS SRV records to discover Alertmanager hosts. + displayName: Enable SRV + path: alertmanager.discovery.enableSRV + - description: How long to wait between refreshing DNS resolutions of Alertmanager + hosts. + displayName: Refresh Interval + path: alertmanager.discovery.refreshInterval + - description: If enabled, then requests to Alertmanager use the v2 API. + displayName: Enable AlertManager V2 API + path: alertmanager.enableV2 + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: List of AlertManager URLs to send notifications to. Each Alertmanager + URL is treated as a separate group in the configuration. Multiple Alertmanagers + in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery). + displayName: AlertManager Endpoints + path: alertmanager.endpoints + - description: Additional labels to add to all alerts. + displayName: Extra Alert Labels + path: alertmanager.externalLabels + - description: URL for alerts return path. + displayName: Alert External URL + path: alertmanager.externalUrl + - description: Defines the configuration for the notification queue to AlertManager + hosts. + displayName: Notification Queue + path: alertmanager.notificationQueue + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Capacity of the queue for notifications to be sent to the Alertmanager. + displayName: Notification Queue Capacity + path: alertmanager.notificationQueue.capacity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Minimum duration between alert and restored "for" state. This + is maintained only for alerts with configured "for" time greater than the + grace period. + displayName: Firing Grace Period + path: alertmanager.notificationQueue.forGracePeriod + - description: Max time to tolerate outage for restoring "for" state of alert. + displayName: Outage Tolerance + path: alertmanager.notificationQueue.forOutageTolerance + - description: Minimum amount of time to wait before resending an alert to Alertmanager. + displayName: Resend Delay + path: alertmanager.notificationQueue.resendDelay + - description: HTTP timeout duration when sending notifications to the Alertmanager. + displayName: Timeout + path: alertmanager.notificationQueue.timeout + - description: List of alert relabel configurations. + displayName: Alert Relabel Configuration + path: alertmanager.relabelConfigs + - description: Action to perform based on regex matching. Default is 'replace' + displayName: Action + path: alertmanager.relabelConfigs[0].action + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:drop + - urn:alm:descriptor:com.tectonic.ui:select:hashmod + - urn:alm:descriptor:com.tectonic.ui:select:keep + - urn:alm:descriptor:com.tectonic.ui:select:labeldrop + - urn:alm:descriptor:com.tectonic.ui:select:labelkeep + - urn:alm:descriptor:com.tectonic.ui:select:labelmap + - urn:alm:descriptor:com.tectonic.ui:select:replace + - description: Modulus to take of the hash of the source label values. + displayName: Modulus + path: alertmanager.relabelConfigs[0].modulus + - description: Regular expression against which the extracted value is matched. + Default is '(.*)' + displayName: Regex + path: alertmanager.relabelConfigs[0].regex + - description: Replacement value against which a regex replace is performed + if the regular expression matches. Regex capture groups are available. Default + is '$1' + displayName: Replacement + path: alertmanager.relabelConfigs[0].replacement + - description: Separator placed between concatenated source label values. default + is ';'. + displayName: Separator + path: alertmanager.relabelConfigs[0].separator + - description: The source labels select values from existing labels. Their content + is concatenated using the configured separator and matched against the configured + regular expression for the replace, keep, and drop actions. + displayName: Source Labels + path: alertmanager.relabelConfigs[0].sourceLabels + - description: Label to which the resulting value is written in a replace action. + It is mandatory for replace actions. Regex capture groups are available. + displayName: Target Label + path: alertmanager.relabelConfigs[0].targetLabel + - description: Interval on how frequently to evaluate rules. + displayName: Evaluation Interval + path: evaluationInterval + - description: Overrides defines the config overrides to be applied per-tenant. + displayName: Rate Limiting + path: overrides + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Client configuration for reaching the alertmanager endpoint. + displayName: TLS Config + path: overrides.alertmanager.client + - description: Basic authentication configuration for reaching the alertmanager + endpoints. + displayName: Basic Authentication + path: overrides.alertmanager.client.basicAuth + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: The subject's password for the basic authentication configuration. + displayName: Password + path: overrides.alertmanager.client.basicAuth.password + - description: The subject's username for the basic authentication configuration. + displayName: Username + path: overrides.alertmanager.client.basicAuth.username + - description: Header authentication configuration for reaching the alertmanager + endpoints. + displayName: Header Authentication + path: overrides.alertmanager.client.headerAuth + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: The credentials for the header authentication configuration. + displayName: Credentials + path: overrides.alertmanager.client.headerAuth.credentials + - description: The credentials file for the Header authentication configuration. + It is mutually exclusive with `credentials`. + displayName: Credentials File + path: overrides.alertmanager.client.headerAuth.credentialsFile + - description: The authentication type for the header authentication configuration. + displayName: Type + path: overrides.alertmanager.client.headerAuth.type + - description: TLS configuration for reaching the alertmanager endpoints. + displayName: TLS + path: overrides.alertmanager.client.tls + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: The CA certificate file path for the TLS configuration. + displayName: CA Path + path: overrides.alertmanager.client.tls.caPath + - description: The client-side certificate file path for the TLS configuration. + displayName: Cert Path + path: overrides.alertmanager.client.tls.certPath + - description: The client-side key file path for the TLS configuration. + displayName: Key Path + path: overrides.alertmanager.client.tls.keyPath + - description: The server name to validate in the alertmanager server certificates. + displayName: Server Name + path: overrides.alertmanager.client.tls.serverName + - description: Defines the configuration for DNS-based discovery of AlertManager + hosts. + displayName: DNS Discovery + path: overrides.alertmanager.discovery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Use DNS SRV records to discover Alertmanager hosts. + displayName: Enable SRV + path: overrides.alertmanager.discovery.enableSRV + - description: How long to wait between refreshing DNS resolutions of Alertmanager + hosts. + displayName: Refresh Interval + path: overrides.alertmanager.discovery.refreshInterval + - description: If enabled, then requests to Alertmanager use the v2 API. + displayName: Enable AlertManager V2 API + path: overrides.alertmanager.enableV2 + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: List of AlertManager URLs to send notifications to. Each Alertmanager + URL is treated as a separate group in the configuration. Multiple Alertmanagers + in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery). + displayName: AlertManager Endpoints + path: overrides.alertmanager.endpoints + - description: Additional labels to add to all alerts. + displayName: Extra Alert Labels + path: overrides.alertmanager.externalLabels + - description: URL for alerts return path. + displayName: Alert External URL + path: overrides.alertmanager.externalUrl + - description: Defines the configuration for the notification queue to AlertManager + hosts. + displayName: Notification Queue + path: overrides.alertmanager.notificationQueue + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Capacity of the queue for notifications to be sent to the Alertmanager. + displayName: Notification Queue Capacity + path: overrides.alertmanager.notificationQueue.capacity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Minimum duration between alert and restored "for" state. This + is maintained only for alerts with configured "for" time greater than the + grace period. + displayName: Firing Grace Period + path: overrides.alertmanager.notificationQueue.forGracePeriod + - description: Max time to tolerate outage for restoring "for" state of alert. + displayName: Outage Tolerance + path: overrides.alertmanager.notificationQueue.forOutageTolerance + - description: Minimum amount of time to wait before resending an alert to Alertmanager. + displayName: Resend Delay + path: overrides.alertmanager.notificationQueue.resendDelay + - description: HTTP timeout duration when sending notifications to the Alertmanager. + displayName: Timeout + path: overrides.alertmanager.notificationQueue.timeout + - description: List of alert relabel configurations. + displayName: Alert Relabel Configuration + path: overrides.alertmanager.relabelConfigs + - description: Action to perform based on regex matching. Default is 'replace' + displayName: Action + path: overrides.alertmanager.relabelConfigs[0].action + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:drop + - urn:alm:descriptor:com.tectonic.ui:select:hashmod + - urn:alm:descriptor:com.tectonic.ui:select:keep + - urn:alm:descriptor:com.tectonic.ui:select:labeldrop + - urn:alm:descriptor:com.tectonic.ui:select:labelkeep + - urn:alm:descriptor:com.tectonic.ui:select:labelmap + - urn:alm:descriptor:com.tectonic.ui:select:replace + - description: Modulus to take of the hash of the source label values. + displayName: Modulus + path: overrides.alertmanager.relabelConfigs[0].modulus + - description: Regular expression against which the extracted value is matched. + Default is '(.*)' + displayName: Regex + path: overrides.alertmanager.relabelConfigs[0].regex + - description: Replacement value against which a regex replace is performed + if the regular expression matches. Regex capture groups are available. Default + is '$1' + displayName: Replacement + path: overrides.alertmanager.relabelConfigs[0].replacement + - description: Separator placed between concatenated source label values. default + is ';'. + displayName: Separator + path: overrides.alertmanager.relabelConfigs[0].separator + - description: The source labels select values from existing labels. Their content + is concatenated using the configured separator and matched against the configured + regular expression for the replace, keep, and drop actions. + displayName: Source Labels + path: overrides.alertmanager.relabelConfigs[0].sourceLabels + - description: Label to which the resulting value is written in a replace action. + It is mandatory for replace actions. Regex capture groups are available. + displayName: Target Label + path: overrides.alertmanager.relabelConfigs[0].targetLabel + - description: Interval on how frequently to poll for new rule definitions. + displayName: Poll Interval + path: pollInterval + - description: Defines a remote write endpoint to write recording rule metrics. + displayName: Remote Write Configuration + path: remoteWrite + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Defines the configuration for remote write client. + displayName: Client + path: remoteWrite.client + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Type of authorzation to use to access the remote write endpoint + displayName: Authorization Type + path: remoteWrite.client.authorization + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:basic + - urn:alm:descriptor:com.tectonic.ui:select:header + - description: Name of a secret in the namespace configured for authorization + secrets. + displayName: Authorization Secret Name + path: remoteWrite.client.authorizationSecretName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: Configure whether HTTP requests follow HTTP 3xx redirects. + displayName: Follow HTTP Redirects + path: remoteWrite.client.followRedirects + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Name of the remote write config, which if specified must be unique + among remote write configs. + displayName: Name + path: remoteWrite.client.name + - description: Optional proxy URL. + displayName: HTTP Proxy URL + path: remoteWrite.client.proxyUrl + - description: List of remote write relabel configurations. + displayName: Metric Relabel Configuration + path: remoteWrite.client.relabelConfigs + - description: Action to perform based on regex matching. Default is 'replace' + displayName: Action + path: remoteWrite.client.relabelConfigs[0].action + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:drop + - urn:alm:descriptor:com.tectonic.ui:select:hashmod + - urn:alm:descriptor:com.tectonic.ui:select:keep + - urn:alm:descriptor:com.tectonic.ui:select:labeldrop + - urn:alm:descriptor:com.tectonic.ui:select:labelkeep + - urn:alm:descriptor:com.tectonic.ui:select:labelmap + - urn:alm:descriptor:com.tectonic.ui:select:replace + - description: Modulus to take of the hash of the source label values. + displayName: Modulus + path: remoteWrite.client.relabelConfigs[0].modulus + - description: Regular expression against which the extracted value is matched. + Default is '(.*)' + displayName: Regex + path: remoteWrite.client.relabelConfigs[0].regex + - description: Replacement value against which a regex replace is performed + if the regular expression matches. Regex capture groups are available. Default + is '$1' + displayName: Replacement + path: remoteWrite.client.relabelConfigs[0].replacement + - description: Separator placed between concatenated source label values. default + is ';'. + displayName: Separator + path: remoteWrite.client.relabelConfigs[0].separator + - description: The source labels select values from existing labels. Their content + is concatenated using the configured separator and matched against the configured + regular expression for the replace, keep, and drop actions. + displayName: Source Labels + path: remoteWrite.client.relabelConfigs[0].sourceLabels + - description: Label to which the resulting value is written in a replace action. + It is mandatory for replace actions. Regex capture groups are available. + displayName: Target Label + path: remoteWrite.client.relabelConfigs[0].targetLabel + - description: Timeout for requests to the remote write endpoint. + displayName: Remote Write Timeout + path: remoteWrite.client.timeout + - description: The URL of the endpoint to send samples to. + displayName: Endpoint + path: remoteWrite.client.url + - description: Enable remote-write functionality. + displayName: Enabled + path: remoteWrite.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Defines the configuration for remote write client queue. + displayName: Client Queue + path: remoteWrite.queue + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Maximum time a sample will wait in buffer. + displayName: Batch Send Deadline + path: remoteWrite.queue.batchSendDeadline + - description: Number of samples to buffer per shard before we block reading + of more + displayName: Queue Capacity + path: remoteWrite.queue.capacity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Maximum retry delay. + displayName: Max BackOff Period + path: remoteWrite.queue.maxBackOffPeriod + - description: Maximum number of samples per send. + displayName: Maximum Shards per Send + path: remoteWrite.queue.maxSamplesPerSend + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Maximum number of shards, i.e. amount of concurrency. + displayName: Maximum Shards + path: remoteWrite.queue.maxShards + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Initial retry delay. Gets doubled for every retry. + displayName: Min BackOff Period + path: remoteWrite.queue.minBackOffPeriod + - description: Minimum number of shards, i.e. amount of concurrency. + displayName: Minimum Shards + path: remoteWrite.queue.minShards + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Minimum period to wait between refreshing remote-write reconfigurations. + displayName: Min Refresh Period + path: remoteWrite.refreshPeriod + statusDescriptors: + - description: Conditions of the RulerConfig health. + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1beta1 + description: |- + The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. + The purpose of this project is to simplify and automate the configuration of a Loki based logging stack for Kubernetes clusters. + + ### Operator features + + The Loki operator includes, but is not limited to, the following features: + + * Kubernetes Custom Resources: Use Kubernetes custom resources to deploy and manage Loki, Alerting rules, Recording rules, and related components. + * Simplified Deployment Configuration: Configure the fundamentals of Loki like tenants, limits, replication factor and storage from a native Kubernetes resource. + + ### Before you start + + 1. Ensure that [cert-manager](https://operatorhub.io/operator/cert-manager) is installed first. + 2. Ensure that the appropriate object storage solution, that will be used by Loki, is avaliable and configured. + displayName: Community Loki Operator + icon: + - base64data: <?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->

<svg
   version="1.1"
   id="svg48"
   width="500"
   height="500"
   viewBox="0 0 500 500"
   xmlns:xlink="http://www.w3.org/1999/xlink"
   xmlns="http://www.w3.org/2000/svg"
   xmlns:svg="http://www.w3.org/2000/svg">
  <defs
     id="defs52" />
  <g
     id="g54">
    <image
       width="500"
       height="500"
       preserveAspectRatio="none"
       xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAfQAAAH0CAYAAADL1t+KAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJ
bWFnZVJlYWR5ccllPAAATZFJREFUeNrsvVmXJFd5r5/h5Zseqiu/QUU3g4az7Cqw4epAhzi2bwy2
7AUcw8EgbDFPEkICgaAFCCSQrAEEEkhGAmyMDQZh4ZvDoCjwjZmUCeuo1S2plf0Naq7LOhE1dOUQ
w9773W8Mmc+zVqLz/7s6MiIjcz/7jfjFu4MOTDUrb/wfYfKf7yevr3e/9f/u5RMBAABot9iX+BQA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMCAgI+gGaxee2X6nHj6Wtz/7/L8w0/dyicDAAAm
/D4fQSPkHTHNAgAAhD6d8qazGwAAGEMtqCvyJ4Wf9sn5h54a8GkCAEAZv8dHoMP8w0/1dgU+/LKH
Kh0AABB6A+gJ/z1CBwAAhN5qoe9V9af5CAEAwARCcbr0jeVNhQ4AAFToLarQg4xXPt3Vd1wZ8jEC
AABCr5H5h56KrYNxk39PlQ4AAAi9kVW6XbWO0AEAAKE3Ruh2l9qHIRgHAAClEIrTJjAMxuVDhQ4A
AFTojanQ3SEYBwAACL1u5r/yVOxY2Q+/qNIBAAChN75KL7+/jtABAAChN0robuE4gnEAAFAIobgq
IBgHAABU6FNWobvRXX0nwTgAAEDotTL/oCQYt3PwokoHAACE3vgqfVTee6/OzvBfIHQAAEDojRJ6
ubyzIBgHAAC5EIqrimCHYBwAAFChT1WF7kZ39V1XhHyMAACA0Gtk/oGzsXt136FjHAAAFMIl9+qr
9KVSeeeT/tvHfOzI6rVXRvvbW0xe/fmHn7qX0wMAgNDBReiB9b93CsaNyXspYx/SqwcIHQAAoYMR
th3jHHq6G8g7i4iTAwDQdsVAZay++4pUnE8Iz8TJ+S+dHYxJPEz+831h9f+S+Yee6nGWAADaCaG4
Cpn/8n4wzm2Bltwqff7hpwa7gTm37RlX/wAAgNDhsHKWVsF54o2F213k5AAAIHQwRyr00962O3qV
gAodAKDFEIqrHvuOcYFRhd632EYWEacGAIAKHXxV6OX317ur783sGNez2Ebm+66+40qqdAAAhA4m
zH9pqGOcezhuMhiXJtQt5J3zvggdAAChgzFpME72wKB5MM5u0kAwDgAAoYMFOsG4g4mC++NrVOgA
AC2FUFw91BOMKyfi1AAAUKGDrwrdJBj3vpJgnO1kYf+1+k6CcQAACB2MmL9fKRj3FYPWreXvh9AB
ABA6WFTF+sE4t8kCwTgAAIQOFugH4/xOFAAAoMEQiqsPtwBb+TPjBOMAAKjQoVEVevEl8+7q+z0E
43a3uzPyWn3XFVTpAAAIHUyY/+JQMK5c3nkyngzGPVgQjMuQd6ezk/WXCB0AAKGDRXUsbdeaH4wz
l3cWBOMAABA6WNCzkHcWOcG4nZ6FvKnQAQCmAEJx9dJXenRNFowLCMYBAFChg404pY+udVc/IAzG
5VwNWH03wTgAAIQORszfNxaMc5sUTAbjHjgrvZSfuV0AAEDokI+0ml7K+dvYKjU/+bcE4wAAEDqI
hS4NxhVNFMy2TYUOANAiCMXlsHrt7qpjS/uVavrfKHldNf/wU7Hnt9INxrlvO+JbAACA0KdB3p0M
Iab///0K3Ucw7rorwvl7zw48b7ez+p4rlua/dLbHNwQAAKG3Wd5ZeL+vnIg4ToQs3Ux6HCNCT0Wc
CFky0TjYLkIHAEDojZP5rcl/zhjKO0+cGvSstp29hvljGX8Zd0wunQfVTWAAAECH2QrFDSe/3QhX
33ZlV0no+bItD7CZB+PsesYTjAMAQOiNRHb5eE+AGpLrO8i2XLzBfuDOfnsHRPxEAAAQeuOYf+ip
lc7YvWaHyti/5A4WaXGnu3q9h6VUM1h9Lx3jAAAQeluqdLvK2H8w7h4PHeMyqvT5+x0T6qOfBUIH
AEDoDcT2MnRQmeB6lsdhul+x45UItQkMAAAgdB/EArmlhKvvaGkwzu0ePRU6AABCbyQ9gdw0Jecc
jAuC9LVTHoxzI+JnAgCA0BvH/FefWknkNhBuxr/kDINx+/IeeXU66avTXbvh8tCq8jdk9X3TG4xb
vfbKKHldl7weSV5Ppv9vhgUAaCOz2vo1lVzoIN0D/Afj7j4br37wigl578va5srByGRl/otne4mQ
XY9zeLut7xiXyrsz2iVwKeOY6YwHAAi9RaSXt6+2lNq44PwX6cFOT7ht+45xZpfiWxeMM5R3ZecW
AACh65AK7oyl1IYJV995ZXf+wd3n2n1fOZAIpSgYF1kfZ9A+ye2L/AnBuUXoANBKZnU99J6ge5rm
wN+XlfglHePK5J0fxotac2bTfITw3K6+7cqIoQEAEHoLmP+KRce4fHFoDPo9i/fPEnB37UMGwTiH
JP3qB9oRjJt/6Kn0vK44nE+a6QAAQm9tlW412O+Mvjo73u8rn/iHp2OH6rn0ysH8F8429YpEPeeW
ZjoAgNCnir6FvKsTXDB2O8DfPeBYuGdtktyy02SICh0AEHoriS3knUW4+q4rqu0YZ8ZpL9utruWt
7qTIrUpH6ACA0Nsz6KePiO1It9KeYFzRds2q2KhFZ1eeGXgHwTgAQOitYP6Bs+7BuEMxaAz68mDc
jQXBOEHL29XrWhKM++pTadJ9ZYYyAwAAM33J3UyexQL0H4y7SykYd99Z6ZrrbZOc/a2L0c+XYBwA
IPQW0XeWpvZSqk0LxrVPcsuCiRoVOgAg9FYRHATjOq4BqnD1PQrBuKDmYFz+8bazQmfZWABA6NPB
6rVXLiWvrpHg7Af/5gXjOhbBOLvjjRp2Xrv7q6V1MydFsnvondV3EowDgPYwdb3cU3l3RhflOBiU
r09e9w7/7fyXz64kFfag47Ly2qjkYrXq0rSiHqW7dtPl4YnPPz2Y2K7wPvrq9Vcszd9ztlfDee0O
nc+DcxvuH/9bk/99dOTcPvjUIBFyGny0u4IS7IxPjGKGCQBA6PXJ26b7l9tSquXbdebEnU/Hazde
birvoip9ROjz957tJUKW7p76Uqol8rY5B73CqwrBTuXnFgBg5oWeDPLh2AAfWUqu6DL01YJqWDMY
Z7/t0fvddkupmm13UencXmMob5tzu3zpWAOnngPcRweA1tCme+jp4PpIMshft/v8t30C3D75bXZ/
OVx9b03BuOL9cw/GVd9FLT2fZ5LX1ckrdLj3HeVU4L3DDoDO3zkAAITuXXDS+7/Zy2KKG650qgjG
+QvribYbBDuRyrmV/fvO6tuvXHKavJRMaFbffUXEMAEACN0jWstizn/p7MruGtrSClNrAuOe1O6u
fTijY5zFdhN5T7xS1m64fMnzuZV2x8ucwMw/cLb8O8Pz6ACA0GtBa1lMaSXnPxj3uZyOcXZMSu7u
7IR6nrwrvCIRO5zP4b8rP7d226NjHAAgdEXsl8U0WzGs/Lnvelbncr8UXXC8iaxjC3lnsahyrPbn
s7zpTZB8Z9y2R4UOAAhdjbJmIe6Dc+xUGR4Srr6vxqVU8/fXz1KqVVTogVrTGzrGAQBCbyDSwTlv
WUzp/WqtgV8qOfcrEsUTh6i2Cr343C5ZTwJNtvsegnEAgNC9MrIspsfqcv5+wVKq5RWinysSblLq
rn2kYClVt6p/l7UPeQ7GffUp6e2FvNBjel69hykBABC6zyrdbYD2E4ybFJ7/YNwdOsG4E//wdE9Q
9WtekYgdPvdh3M6te5gSAAChC1gWDvr2l6HNhNeqYNzISnNuLKofq79n75cF29Q8twAAVOgKg35s
vb3RvwtXP0AwTkQajJNlGaKc7fYamI8AAJhxocsbrmQvi1nWia6+Kr0ZwbjJ94waczXioL1r8lp9
1xV+OsaNHe/q+wjGAQBC90q6LGbHJeR0aZDeHfgnw1NfGArGubeB9T/o+wjG3WwZjDM8/rUbPQfj
HjQIxg3JO6dPe1bosfg7U+8tFQCAGa3QTSuuIGPw71wa/BcL5emO/2Dc7UrBuLuebmoP+9hC3jbn
QHq8BOMAAKErsGwhbxsR9Z33SPfxJr1gnIzT/q9IKK2QdtAxzuOkCAAAofsc9MvlbTM4mwkuv8oL
V6+boWBcUFFmwO64I2/HyrPoAIDQ1ZEvt5m9LGbPQpJVVnJawbhlm88s4/2WKj+3BsednNslle2+
n2AcACB0rxgti1k+UE+Gp+7bX0pV49Ep2QTELRh3+PfdtY9ZBOMsJgtrN10eeT630tXRsjvGfXHo
O+NxuwAACF2jkrN7htzvUqqH+A/GfdYgGOewMM2JO58+bKVrOlmoQnIHTW/cJxv2oUfJdgEAELpo
0F92bABTdrlYeg9XPxjnt9uZ9PE1vY5x7on04lsM/rcLAIDQvVbodoO1WzCufPvh6vUKwTh5t7PT
rZHcQcc4dyLrCh2hAwBCr1Hosmeo85bFFC/R2uk0JP1tsk/NlJx0oZzO6nuVOsZdRzAOABC6V+a/
7Lgs5uhgPRmeuncoGOe7QqxScpNC6q7d4riUasn21z7iORj3pbPiLnad7NBj8XeGjnEAgNBrrNIF
VVynoxCM01pK9TNKwbjPPW0+MbJ4YkDM8Gpwbldg6BgHAAi9RSz7ruL2Mbu8XaXgxica2sE4u+3r
L6VqP6miYxwAIPRWVeg6vchjieCCoBOu3XB5A4JxO+Ov04WSc3/+vr7MQP55ibxNFHgWHQAQujKB
+JnxNDwVGU0U8uWdvHZGXvutaCuW3E7GS1Ch1y90cde+1fcpdYy7nmAcACB0r5Qui2k2YE+Gp+4Z
WkrVTN5ZaAz6PQt5Zx13d+3jl4XehD70Oa7d7DkYd/9Z6bKxmRON+XsdOsZVM4EBAJjhCr1MRrLG
KD0Ledts15kTt6XBOGN55x33ZDDuDoNgnEPgzgOxcKLRmmVyAQAQulJjlETgOs99+57A+Fs8Rhq4
qycYZzl5GfnOOFyJ4D46ACB0LfQao0irw3DtQ8rBOLfjLg/GNWcC03eavEiWUuVZdABA6LUhvv+b
syymtDqsRnL+xNu8YJzJ5KXkHKx+oCAYZ/Xkwujtl7Ub/GYGAABmXugjy2J6FO+Jf3h6NBjndhla
Y9CXPp/dXfuEp2Dc2Oex9lHPwbgvnJXeXsg8t/P3nN1bZc5Q3nv5Cap0AEDo1VbpbmtdL5ZWiG74
D8Z9+lzsItvSYNztJcG4ui5FD3eM83sOehbyruTcAgAg9LL7v+6XxvvC/WpnMM52e2UTo6qvSBgE
2BJxL1vImwodABB6Yyt0s8E5Nt6D7PcN125sQDBuct+Kg3EeK38P9AX7kxJ5mSiQdAcAhF5JhS69
NJ6GpyLjQd9u0lCt5GSXxrUq/zona+mSp3Yd4wzfZ+1DBOMAAKF7pXRZTLPKa/K+8l1PHy6l6v6o
mMagL10xrLt2JiMYJ38krrP2Mc/BuPvOiidrnezQ4953RmctAAAAhO6lknOrvPwvpVq8XWdOfOpc
LBRctuQ+Y7GUarWSix0naeWhx4adWwAAhD7e/cvf5WL7YFw191q1nhuX3luuNhgnu8VAxzgAQOiN
Q3652C0YVy6UcO0mhWCcPP192lpy9XVR6ztO0g6IKp4oAAAg9Noq1iB3WUy3UJZ+JSdNf/t9dG2X
3QVs9DrGCVj9oJ+OceOs3UgwDgAQuldGlsW0q1SHB/DJ+8p3DgXj3OWpMehL09/dtVsLgnFm8u5k
LeG6dstlXo93/p6z0tsA2ef2rqcLO8YZvhdVOgAg9EqqdDvZlQfjGrIS2YlPnos9pL8nxHvitqxg
nMX663pXJGLHydTenwU7fkKPk+91mmEEABC6b4Y7xrnJbilnu/0GrkSWLzlzciS307OQd9bnrROM
M5d3VjtX+2Ac99EBAKE3qEK3mxD4XUr1kHDtI40MxplLru5mOkF2ZsCiF3tUeG7d1gDYWyb3wyrn
FgAAoTuI7dIAvvpBx6VUy7fdrraoGlc6hOdWsJDKLms3XL6U8Rn1BGsAUKUDAELXYHdZzKxgnN26
15Phqc+NLaXqtu1IdQLjmNRe+9Rl2ZITsvZxz8G4u8+Kn2LI7Ab4+f1zK+sYFzGUAABC901gXl3m
VHyLxlWr3aDvPxh36zm7tqim6e9Pn8ueGNm9R30d42zv7QfiWzV0jAMAhK7AsoW8s8gPxsmoNhhn
PvlwT3+7tdL1c0XC7cqEvBtgdbdTAABmXujSe61+g3HD4amblYJxOguMyFrpBkrBOJ2FcmJjgWej
FXoEAJhdoSfylt//vSGz+5e0YtWp5ORXDqKc7fYs5F1V1doTfE67r7UbL1fpGEeVDgAI3TOXlsV0
q6Lzw1N3jAXj3Ab/qFGS22f905eZrxducdxrZ/wG407c9XTP8XwWinc39BjkhB7Nv0MRwwkAIPQq
JWcmpMXSqtUN/8G4T5yzEnoQTL4yJfepc3ttUZv3+FrsOsEoOQfSRx4JxgEAQldgWVBFF4lIdnk7
qDYYlyNvHclVNIFRXFWvL7wiwSV3AEDoaoO+O/JgXLYIwrWP6gTjLORtc7zLjp9/favMle9TlPN/
j4X30LVCjwAAM12hS1ui5i2LKb6vrFHJBVrBOHkQsJ5gXMn5WLvp8iUv35nJ96VKBwCE7pPdZTGL
gnGOi26cuH1oKVX3BHjUiAnMGOu3GQTjHCrYtVs9B+PufNptn8pCj7cXdAM0/w5FDCkAgNC1JOd+
GdX+vrIkcCdg7uPndNYL/+RYMM4NjUf1YssFVGTn1nzSQDAOABC6wqC/rHQfvS+cKExfxzi3iZGf
KxJu56F8mVz7Vdc0zy0AABW6w0SgONB1UB26E659rMUd4+yvAOh2jHMjUpoUaZ1bAACEbiGerPBU
5LVi1ZacjMjpc6yjO56P0ONHcoJxskkRVToAIHTfnLhzLBhnM1AXhac+YxieKn6/qDFXJIYoDcbZ
93Pfk+enPAfjPve0dJKRKd4Tn7UMxmW/d8SwAgAI3X/V2rOQt82KYcL0947/YNwt58RVa+YE5tah
YJy7RPU6xlUZejQ7ZoJxAIDQFZCtGGYaniqWd8ar4mCc+bErTWAUO8a5/3uljnE7XHIHAISuWqH7
bRMaW8g7i3Dt45fpBONcq9biIOCy0ucoQbqGeZTzt5Yd4ybOd7h2y2UE4wAAoasIzm0ycBCeirK3
ayzv6h5xOrhy4Ca4fMnJ789XF4yzWQ3uZtuOcQbnXK/lLQDA7Ar9xOcMl1K1XW7ztoLwlLlQokok
Z1mtr3/2Mi9tUcf7yq9/2nMw7o6nVRLph+fWWN42EyMAAITuTXI+l9uUCcV/MO5j53pu+zMirknJ
feJc4cTIcGEY/aVU7a/A5JyDHenEiGAcACB072jd/5U/9119MG5S3lmVZ2EwTrCqm27HuHJ5Z8nY
vGNcM84tAAAVumBCIF9KNVsu4donFINxZvI2Pt5E3MuBTitdCX3nSrp4oZxY+J3ROrcAAAhdUsmt
3ezYMa6WLmo7fQt526wYpjUxkp1b977re+f2Y8KlVG2rfwAAhO7GiTtygnHCNcxPfPrcaDDOrYta
VNkExmL/1m9/sYrk1m/zHIy7/WlxCDBrAjNxbv1W/wAACF1QHfaE90QXS7frVsX5D8Z99LxK+nvu
4+dUJkYezm0szEiYhx5rPrcAAAjdZcWw0UHbvKtY3YLbIxb++/IJjM+JUR3ntnxVvaaeWwCAmRa6
9HLxklF1aC+4cO2MdjDO6wRmWbhf9ay85tJnvrnnFgAAoUsqrrWPeV5KVVdyOm1R5Z339IXu0LZ1
7ZbL/J9bOsYBAEL3z4nb94Nx9sunFlbpJz55zn65zcltR02cwKzfIQjGFbzP+mc8B+M+8/ThanBW
Pdd3CidVVueWjnEAgNArJBBLzn65TdkKZ87M3awUjLslIxhnn+6v4LK7ddtW83NLxzgAQOi1syyU
XHEwzn3b7Q7G2VeuCpLbWXbsuV58DugYBwAIvUUVunRwPghPuROu3drAYFzHMhhnPqHRrdDdJlZ+
uwEe7ku49kmCcQCA0OsX3JgY1m7xHIwre3RKRl9yrAX39h0u5xcv/uJlsub+HPouax8XBuPoGAcA
CL0adsNTRUupmklqMjx1q2F4qnj7UaUTGEMhr38uIxhXeqWj/P71+mdf7PV4T3y6eDU4w8/A/NzS
MQ4AEHpDqnSHR5v2X2bhKfug2GnfBzr3kfM9p30puXIw99FheVou/qJ7RUInnBjQMQ4AEHrzOFhK
1b26XCrcrntQbEnpeGPhlYOC9cJ3FLYrYtmhejaZZNAxDgAQemMrdJvq0mzQlz4S11371GWhetVq
X62bBeP8bVcyeem5rLZmMFmTd4z7FME4AEDonjmoLI3knTl4Z4WnTpw5F2s89+1Bcn3hwiVRoTzd
t6t3yV1wHtbO+O8Yl64hH9AxDgAQul9O3DZ0/9f/c+Naj4n5q9AdWP+8cCnVvO3e7jkY96lzex3j
ZBOgyWDcJ8w7xu3Le+RVODECAEDookG7J3xu3L5jnJkA/Afjbjqv8kjd3M3nDydG9turJxhXvj9W
57ZA3jbfGQAAhC5AZ7nNkue+DQRQX8c4twBb41redpSa3iTnqm8hb+PtAgAgdO0qzmW5zaHtWlZv
B3TXP93CYFyzWt72lNr7xs57tLcP4fptBOMAAKE3R3AHHeMywlMnPnEudqzedCWnHYxr1rGKlzxd
+6QgGOc2EQQAQOgu7HYVM1luU1ilO1ZyzQzG3akUjPuc52DcrRYd4yxatc59PCMYZ38lIGK4AQCE
3qQqfQ9ZMC7//fwH4248L65aM4NxH7EMxlXXUMd9ydOipVTlj+oRjAMAhO4dm85u2bStq1gsnGjY
TWDka8z7O7f2zWacQo8ukyIAAISuUcX5GJxN+37n012/rYJgnK+JRuCwxnwdj675mVRJJ0Xh+mcI
xgEAQq9H6AUiWLt1Mjw1d8u52MO+6Qfj7CcakdPnWC5S3aVUHa/ArH3KMBhn3zeeKh0AELpPTnwy
Izzl73Euu+q/aQ1Xcli/qyQY5/i42PrnPQfjPuGwlOrBIQT5rVqTyVoapBwILuUXTYwAABC6t0rO
Hq2GK/6DcR9SCsZ9+PzhEwNNuiJhMIEp6RWgsExu4fK7AAAIXcCy6F+XdYxzv7c8HcE4889RvWOc
Q6OfvMxA30LenYyV/LjkDgAIvY4qrkRyxcE4N2mmdNc/26JgnM3EqKJH1xJh9wRtWov2KbaQd9ax
h+u3v5hgHAAg9EqEbiG5rPDU3MfGgnHW95Z3paBRydkH40b/PlL6HJuRGRjb1/Xb8oJxxvKmYxwA
IPQq2O0qNtwxzq1zXPnja+Xy7lRyaTYQJ9I763fnBOOEa8Gv3+k3GDf38ZJgnNn+TmYGPkrHOABA
6NNapVvcVza8NLv3/v6DcTecFyfSMyV3k8NSqlVW6e7HSsc4AEDorcGlMYqRiHb6FvKuqiVquu24
KWvBjx27Xsc4/5MMOsYBAEJvTYUuH/SllX93/fYXhy063vJgXPX3lbX6AcSO2zsgXL+DYBwAIPRa
B/2sx5/WP50RjPvo+Vh6X1lJcm7VpW0wzl+THp1za7h/65/13DFO93gBAKHPLkVdxSyeXfa/lGqd
VWuJnNbvyQjGye8rp53oIp8HOnfL/rm1b89aeOtj7ubzZh3jiokYegAAoStIzqHxyDD+7iuPCsV/
MO6D56WVdLbkPtTQYFwgvfWx057MAAAg9Fn/AIJA2DHONTxVx8Ile+8bSyppxQmMTsc4+/asQy9B
MI5n0QEAoVdfoQtFVH7J3a0S7q7fMUXBuLpWXjOW907WvzcLxtmc34OOcZ8nGAcACL1awZk0XMno
KjZ383lpJaxVyQmCcTvpK/I+gVFfZc5I3pn7vH575r39nqAZEVU6ACB0DUa6irkn08sl53YVoMZg
3M7oa0iC6/e+qDgY5zhBWr/bczDuo+m53VlxmaQViXfuI+fpGAcACL2R1N1wJV8G/oNx12cF4/Ll
bSy5GzKCcU14nCvIaE3r49zSMQ4AEHojsb8MbXYfve+7OvQjuZ3YQt5tl5x76NH23Np9b7jkDgAI
XYHYsYI+IFz/zGVdcXU4+T7d9c9PWce4qit0+VUS945xxd+bcP1OgnEAgND1Bn37xHJuxTX34fOx
YJKgKTlpP/LI20TB7EqHX6Fb3gpYvyMnGCfYpuK5BQCEPrvM3XLusPuXu4DzJaeyAEytVWtn/b4X
LVlv1+SJgXs8B+NuPj+5TK6HKwdzN411jHMj4tcHAAi9KVX64aNQ+feVZfgPxl33jEo/8rkPDgXj
PC7RWtkEJv8zoWMcACD01hB0+vadxHZMRNTM8NRwx7jAan+KZXSQG3CnnmCc2zPjdIwDAITeQGIL
eWcN2uH67ZkhJ+mCKN31OysOxplV1zrBuGBHv0K3X7DFf8e4PcL1uwjGAQBCVxj0jeVtXHHN3XRe
OuhrVXJ9wf6kRNYThczPcvwZ+AqeRXeYxKx/XtAxrvpzCwAIfXaZ++g5ve5fpl3Uqhz05ZfGO+tf
sAjGWTSwWb/3RZHXc/vhsaY3vlaZuzGjY5zVZKZT9MQAAABCV63kivEbnjrcL//BuPc/0/OwmUnJ
Xb8vT/vuc9VMYOpYZa54AkEwDgAQugLS57PlwbjswV/rsmwsqCwLgnE70slCMzrGuXQD9Hd/HgAA
oasLLn/QDtfvsAjGmV/67a7fVUPHOPclT7XarTbvWE2eFijrGHc3wTgAQOj6g76P57NvPB8Lm8to
Vel9wXGmfxI5ybP8PXWF7ngu1u+kYxwAIPRWMHfzWPcvt/utMslVOehb3FfO89TGFw2DcbbtVr/g
ORh301DHOI/nYO4GOsYBAEJvT5VuJ0l/wbhRAfoPxr0vOxhn6d5JyV33jF271epyAz3B50/HOABA
6C1DK8DWdyqBNSv0vbeJra8QSyRnPluoNhgna6ZDxzgAQOgNJBaKKFz/fEkwzvY+68FSqne3KBgX
JPJs6qI0/vvMx5JLHLvfmXsIxgEAQvc/6LsvoZpbpc99aCgY16xKThSM67QxMyBg/R/oGAcACL0V
zH2koPuXufDMJOchQe9NcgLRbdzvsJSqwaRm/Yueg3EfGusYZ7gfpZO1Dzp2jBvtnhfx6wMAhF53
lT7JYqk83QTqPRh3/L1KHePe/4yZPKu/IiG7t+8ajCtvfUswDgAQukLV2hduwe9SqrqCS4kllbSx
5CwnSYGG5Gzu7buGHi361ldwbgEAoc80doKbHPjD9Tsdl1It3m53/Z4agnGulfS4PO0337wKPfdY
d2LHvvUHhOv3vohgHAAg9MoEJxj45z54PtbYrgf6jvtyQGTzOVpsXkfowq5963cbdIxzm7RRpQMA
QvfJ3E37ISfZwB8pCaWe9cJLjLzx5exgnPWhjm/3fs/BuBssg3HZ+5fVTKc8GFf+YUT8+gAAoWtI
Tsai0nb9B+Pe84zOFYn3lQTjZM1cZFW6ffU8vH/lmQG3+/QE4wAAoStgH2BzWW7Tatu792j1gnEa
yX55M5dqOsbZ7V9eZqAvWHVNa/ICAAh95okdq7cDwvW7HINxxSnp7vq9LwobeEWiOBjne7vSY9VZ
+S52nCAcfme+QDAOABC6b8TLbWYN/HPXjwXjmvOIk/SRuqj0c2yK0EUBtr1zlEyqosKJQrOOFwAQ
+uwyd+PQspi+JRfs9ISPODVxJbLOxgMeOsZlbfdLnoNx1xsE48onWlnNdAQd4y69In59AIDQm1DJ
aS63efge/oNx79YJxh1/b0Ewrt5H9XoW8s5C0DGu8JgJxgEAQldAa8lTrSVapegF49xWmNNLfwc7
y0pXSawXuxn7Ey65AwBCVxGcfTU52jHubstgnNl7dNe/0LJgnM7EyE+F7ja5yjvW2ELeWX8WbtxP
MA4AELp/wbktn1ooo7nrnokFkwTNKl0/GOd2BaB6oRvs5/p9mff2e06HWs0VGABA6LPJ3A1jy2L6
XC88ELeXbWLV2tl4MCMY52OJ1i97DsZdN3Rv332xlqxmOivJ/2kQWHxmdIwDAITelCq9GK2GK/6D
ce96Rly1Zk00jr/bcCnV4vfWex7d/Vj9dYwz2y4AAEIX4H4Zuvhea7+hzyvHwomGPNmf/d56HeM8
9hkYObdB484tACD0mSYWSCglXL/Hw1Kqk+/VXf+iUjBOZ6KxLKiEdSSnd6yx4zm9FKbc+BLBOABA
6L7pCSSUO/DPvf8Z6URBq5LTC8Y1bZU5D01vkklVZL3d+jISAIDQZ5e5Dw51jHO/jBoZCcVSeEFT
g3FfyQnGCdl4wHMw7v3ypjdBdjOdwzCl+5MMEb8+AEDoTZDcyCC9k7+Uqrm8s7zgPxj3TqVg3LuE
wbjiFexk59atAczBnxWfW/fPkWAcACB0BcwuQ+e3ELXqGGdR1DUvGFc0gdFbh9ydnKY3xoce+OsY
V9G5BQCEPtPEFvLOIly/dzLklGyi59x8ZD8Yt3F/jcE4+wmMzjrkwgrdQt7mDX5KOsYZEG48QDAO
ABC690HfYfGOUhkdf19BMK7ehUv6wglMVDhR8L8OuWDuknPVwGKikUyqIuerEUXvSZUOAAjdL3PX
n3dbFnN0sI7yJwud5qW/hROYja++UL6UatZ2H/QbjEsmVel5XfE90Tj+7pKlVM3OecSvDwAQuobk
XCutosYo8vS3/2DcO57pedjMpOTe6RiMM1kURTbZkvYE0OoGSDAOABC6An1BpVVUSet0opMTCyXZ
pvXClwUTtfxz29xugACA0GeYwHKt8Mm/Dde/8CL3jnH5793d+JJCME5+RaI8GGe9Rvru5f9qnr23
W7BFr2PcgwTjAAChVyc4QZV+/L0ZwTj7iUN1HePM9y3K+fc9C3mP3cdXqlpNw3oFx56zGhwd4wAA
oTeNuQ/sh5xkAbZyybk1I6m14UoeGw8ZBuPy5Z293a++MPJ5oMffk3Fv30N73+PvcuwYZ/KdAQBA
6MJKTob5fWU7AfgPxr39WZ1g3DsSeQY7K6byNt2u9yrd17mVLwBDMA4AELoCdgG2SSnnB+Ma9nz2
PrFQkvKlVKuT3LLjMZafW/dtaoYeAQChzzSxhbyzCDe+mBFyklf+3Y0HWhqMa47keo7HWDapigXb
3PvOfPWFBOMAAKGrDPoWl8Mz/iyrCYl9JVxNVzGdYJw8LKYvdId73jmrwfWstznelY9gHAAgdL/M
vf+ZvaVURUW6g+TqS0PLg3EPlwTjHLe/8ZDnYNy703v7Qx3j3MhqpnO4/K6JvLO78kX8+gAAoStV
coJFVbS6ivkPxr1NKRj39mcP5elxu96rdPsrJQbn1qmlLsE4AEDovknG5L7limhmImpuV7FY+O91
gnFBDcE453O703dc0Ef73AIAQp9pYosBPotw435Bx7h82XQ3Hqw5GJf9mfgJxlWTGZDeCtDrGPcQ
wTgAQOi+S/Se8D5r5sB//F0WwbhqG8z0BRVrSmQ9UagrM+Dh3OasBkfHOABA6E3j+HufcV9K9XDA
NpecXaWoV7W6VKz7f7fxtYJgnCB0t/Gw52Ccy2pwBlckjr/9WTrGAQBCbyjSZ5YXSytEt0rRfzDu
2md7NvI2fUb7+Nv2g3Eyqg3Gmcg4oGMcACD0NtF3qqI1uorpCy4lFlw1KJKR1jrkEpbtzulEat3P
uTXPIgAAIHSBTGLJylydNBj3ZU/BuNH36G58RSEYJ+9zLgvGVZkZKKqkzRaRsQ/GmXYZfJhgHAAg
dN+Iu4p1spuQxI5i067SpVcOIuMKvTGZAbsV4IbJWQ1OnBnoEIwDAITul+PvGer+5X5ftFhy1mui
XxJPMxqujO3fxiMZwTjbzEDGZ7LxNc/BuIPV4ATHmhmMe9uzhV0Ghd8ZAACEXovkiu7/loWnyruN
+Q/G/d2z0kVasq9I/P2zqeBWBFcjdKt092P1kxmY3D7BOABA6AqYXYa2v//bt5B3VYJLiQXSLZac
7DJ0NR3j/NwKkD7TzyV3AEDoDRRcuPFA1lKqOz3LPt/jdDe++sLQ+9HKH6nLa3m7LNyzahal8bNP
sdX3ZPLvwo2vEYwDAITetEE/c+A//vZnY6F4tR5xqi4YV/exBuJ9ym56U5ig79R5iwEAEPrscvzd
go5xh4O4THLVtoCVB+MeLVlK1e1Y08Bd5PXcvq3g3r65hLMyA3vfGdkthohfHwAg9Dollz2Im4en
7CTgPxj3VqVg3N9lyNNeePoTGF/NdIKOdElagnEAgNAV6DtUbiYi6ivdw5USCycaZi1vmyC59N6+
TjMdOsYBAEJvHOMd4+wH/3DjwaxgnLiK62483KJgXKehwTi3z6hMvLHDlY3R78wjBOMAAKE3Y9Af
HcSzmpDEQploSa5ZwTjdqlW6AE9n4x/pGAcACL0VHH+nZTAuexB3k1y5EJoZjPu6YzCuTJ6Peg7G
XTt2b9/tUbOszAAd4wAAobeqSjevwMqDcW7VnP9g3DVKwbi3OspTewIzfIvB7RzTMQ4AEHqL6Nv1
XZ9Y9COv4Upf2Cu+bcG4WHi8pxWOdVlpkkHHOABA6I0jKAo5Ga3YFW589YV+llId3a/uxj8qBONc
G+q0cS34sgrdfZ9iwSRh7zvzKME4AEDo/gVnLm/jgf/4tRbBuGrXC+8L9iclMq787d4j3Pi6d8lJ
16fPbnpTtgCP2ftQpQMAQvfJ8XekS6nuDISbiYyFYresahMbrnQ2vmEZjKupLWpm0xv7/ZqcrF3z
rGaYEgAQOlRWyZmGnGye+84e9P0H497yrLQJTLHkbCcIo38bqZ1b98vjdsE4eZgSABA6CNAKOWl1
opMSC/+9W1tU91S5Owcd49wx7wZoN2HjkjsAIHQ1wUlCTg/lBONkDUi6G19TCsa5ydE8GNeUR9fk
V1/ynmKIhZPAcOMbBOMAAKH7ruLkIafs1bliD3tXT8c4l2Cc/NE1DcmJ7+3nNL2hYxwAIPSmcfxt
z7otpWp2/7f5bVEdpLT5zRcseTvW0X3xG4w7aHrjWbzH3+KhY1xAMA4AELoGWvd/pW1R/Qfj3mwX
jNv7s52RV6bk3tzQ9Hcg7gkg7xhnt/wuACB0EKB1/7cvrIYrDcZlyzvzmfw2pb/NV4PL3k+V0GPA
JXcAQOgK+Lj/+/ALs5dSlT0m1t14RCcYZyHvLGH5SX9f2uSlfaguGGe+YEt+xziLY834rMPNb76A
YBwAIPRKBn27qi5r4ZLYw2TDu+QSofQt5J0lrCh3YmQu7+FL+JcmRgqS6zmutnbp7za+kdMxzu0Y
Vc8tACD0meb4tYb3f93aokrvtVZTtVpW1pv/VB6MMxabWfXvdm6vyekYZ3e8k5O1v937zljIm45x
AIDQa6nS7as6rfvK3oNxx/72uZ4g9V0ouURoAwt5m1f/sksS0uNdzKnE6RgHAAi9cQRj93/tB2qv
95UNtislFv578wmMjyVaZSwLvhfeMwMVnFsAQOgzTSyQUEq48bWcYJyM7sajDeoYVya5IENywsq/
smO1XfkuEE+Kws1/JhgHAAjd/6Cvs3CJ26Cv3/u777AfJpfG5ZL7pxfod4wzfrxs7/bB5jdfEIkn
RdVlJAAAoc8ux//esWPc6GAdOVWI5XJpZjDunz12jFOcwBx/87PpSnArpvLOCbhN7NOxNz2X/50x
z2BE/PoAAKFXITm7wbr8vrLbEqP+g3FvcgjGGSxeUig5888g0j63Dun0/HMreyyOYBwAIHQF+sKq
tfy+spvoqg/GmR231gTGu+QSYS87PlpWfm4lFTqX3AEAoSsQZHT/siPceCRnKVXZRKG78Q3FYJx7
UrtNExjpojRLxt8Zu+9NuPktgnEACB10B323SUHW6lyxxnPfHva1L1w6NjKu/O2k519y8ja8aTOd
qKrvDAAgdBBw/K2qK4ZJB/76Hucqkty3coJxDZvAHHvTc+l5XXGQbXFm4I3PNXOVOQBA6KC2Ypj0
ESf/wbg3PqeSSD/2f56TPTHQUZNctcvk0jEOABB6jQRq3b/6ApFoVegpcaWSM38v3Y5xbsn0pYq/
MwCA0EFNcOWEG18vCMZZLbk58nhVd/ObLwhVq1bNteDtJwoamYGeoLVv0T7Fwv0KN79NMA4AoYOe
4OyrytzuX8ffvB+MM5N33vrkesE493BcVCo5t4lCuPkvih3jHCvqzW8Jg3HVP5oIAAh9Njl+zbPy
7l8lwThDeWe9bzODcf+SIbmyatgMv8G4//PcYcc49wnQZGbgDTmZAbtJQ8SvDwChg4bkbAbjyb/N
XW7TQt5Z7+s/GPeG3WCc//T3G1oajCs/F+Yd4+w4zc8OAKGDbwK1AFtfUMF5r1iNJVdVMM5wYiRk
WXBe889BIFiiNafyBwCEDnJiR+EeEG58I2cpVdmz2d3Nf1IJxtmnv10mMP4mRrLJi04iXVr5dzf/
VeXcAgBCn+kKvSe895s58B9703Oxh32rPv3tLjnxxMh7+lu6Pn2y78k+RRPn9m9yzq3d43FU6QAI
HXxy/M2OS6mODtyRUiXXzGDctwuCcZ4nRhKOvcGiY5x9Ir0nWHVN69wCAEKfeZraVcx/MO5vHNui
ju73UsZ2NdeY93tu7a4iLBpX/3bbJRgHgNBBgb7toDz2OFr+fWW3JUW1L8uWV+nF+y0LxtmvMe9O
GmCTJdLNz22NVyMAAKHD3qAfW8g761nycPObL+gaVXF2FV1381vKwTj7yrJYcrKqtVm3GIpbwErv
z3c3v0MwDgChg9qgb9UIpnzhklggTT3J2Sbwg45pn/NYcDViry3qvyl2jHOcXG3+a0Yw7vUWoUfu
owMAQq+G43/77Eoi7oGFvLNkFRXK051qq1bDiUci3khju76PNzMzYL9Yi/nja/VfkQAAhE6VLqym
/a1ENrof/oNx/3tfcp6f0U62uxeMk203quWKhGtmgI5xAIDQG4Z84RK3lcjKhbKkdLzSx9cWK92u
DJ3MQGB4bvPPARU6AEIHBWJhJR1u/vMLukYVup1Qupv/UkEwruoJTP7nWH9mwPxY5R3jvkswDgCh
g2/E4amsgf/YG5+LhZdltap0ae912Xrh+Z9juPmdmoJxBfuY7FM0cW5fJ+wYp3duAQChzy7H3pTR
GMV+FbbIq1DqErppMO47hsG4mhemOfZ6g2Y65fu4ZFT98zw6ACD0BmB6ada+MYq0GvYfjHu9UjDu
9c+l2xwIr0hEqhOYRoUedwjGASB0UECrMUoTVyLTDLA1Lxg33DHODQ+hx53R194jklToAAgdFAZ9
6f3ucPNbBcE492q4u/ntGQjG6ba87Qn2x+FZ9Ex5Z71Hd/N7p0J+fAAIHeoe9CcFMHkZ+g37wTgd
eTZRcrHldsYnN+HmdysOxhlMtpJ9iibO7WvTc2ssbzrGAQBCr4Jjb7RYMSx/cI68ThbqrFodJTex
XbcrE36Dca8bCsb5b8NLxzgAQOitqNJ9LLcpX+HMfzBOSXLJdkeDcW5E3s+svA3vYuF26RgHAAi9
QQRqy2L2RVVroBiM05BcpwUd40o/m53xV3ua6QAAQgdBx7g9ws1v5yyl6r4KWUp389+mOBg3GSSr
+Nn7nYwgm/GxSpsSdTe/TzAOAKFDdYO++SCdtXBJbDnI2wiluuOdlK99MM4sBR5u/vspnWCcmbwz
z83m905FE+f2ry/EwvOqdW4BAKHPLsfeUBCMMx+ko5x/38SuYgbBuGL5JuKN8uXZMX+Ea/Iz9huM
e+1zg+T9V4Tn1qxjXKcR5xYAEDpVurCDmtZ9Zf/BuNeOB+Ms5Hsos6WM7a4k/35gIW/zNean7dwq
dQMEAIQO0mUxfTZcUaxYD99jp2ch7ywZlkvOb7tVybldFv77di2TCwAIfcaJBdLdW0r1Xw2XUrWr
WLub36kwGCe/DK21xrysQnc/r0X7JF8m9zGCcQAIHaob9AWSO/b6oWCc/+YmsuP1vEiL08RoknDz
+6eq6RhncfzJPkUT5/avLjR1mVyAxrD1f09Gyeu65PVI8npy60cnr0booMqxv9kPxtkvnzr8inL+
Tvrct/9BX75IS2b6uyNvLev9eI/99YXBxGpw9ucjElf/2ceN0GGa5b2TfMefSF73JK9r9r/vM/md
/32+HhWzJ7kwd/Atp+i+8pLjPqX4D8YlkkuEnE5iuoLPa2m8Ik+2u5JsdyD8HCMPlX7WOQgF50B2
bvOPm2ActFbe+9/9xd3/mk9OZ/I7j9CrJw05XV3rcptVVeiHMooE+7ZYKE/3z3FR7dz6Pwd9x0mL
ytUIgIbJu8rxrNFwyb36Cl26Qlq4+R3DYJxteOq7LQzGNesHHwvPQbj52KnyboBBgbzzOsb9B8E4
aJa8DS6bS+gm25y57zwVevW4r442KqMReRx73XNxImTpvqXbHXg/Xh3xpsd/RjCx2pXnsasvrDTo
WLPP7V9eiDd/cMqtQtc9twBVV9624+RMfeep0Cvm2OsNllJ1b4wiDaE1MxiXkf7uyB8T8368x/7q
worzAGIaeiTpDrNdeduMkzP3nadCr69KDx0rrU7HVzBu971Hmr74D8b91YVBImRZMC6rak3kmWx3
0BkPodl9nlGnjmBcUMG5nXw/gnEwLZW3KTP3nUfodRAYhqfsq62+hbyrrOLMg3HZP97Fguo/FOyX
fjAusD7etoUeAXk3Qd585ztccq+L2PmLftAx7rsFwTj3hUu6m99TCU8tG7x30eVk/+uFa/3gD0KP
ecdSfrzh5n+c8hp63P8/dbceJxgHZvJu0GVzyba7Wz85OVPfeSr0eugZfdHLZTQyMTj22ufizX8/
5WO7A5Xj9f+oXnr8Zxx+6Ify/MGp7rG/9ByMk1cbk+f2Ly7Em4+fkn5tCMbBNFTeNtueqe88FXoN
HHvdWDDObZYalQrFbbunFX5s4vT35mOnIueJUYXBmd3JwXDHODfKz63N6dVehAeovKutvG06bc7U
d54KvS607v8GgvCUguB2JXf1hUEiZP/BuKsvrCTbHXRcA4bBJXnGClckQsv9KD23gUkwLqh4sgZU
3vVU3qZ/O1PfeYReH+7BuN174rk/0PR+9TWCH1CkdLz1BuMCy4mR1rk1G6DMMgNuz6ID8p5Wec/8
d55L7vVhVhWOh9uCS+G2cPPfT9kvpTr+o8i4dLX5/VMaP4LqgnF2l/v0OsbZhdaGX+HW4wYd4+zp
bv2QYFzb5T2Dl80l+9Hd+unJmfnOU6HXRy+n8raZoWYtXNLb/N4p6cx6qSNvUpN9vIHjjz+/BWx6
/GcE+7WbKj/2Fx6DcQXNdALzbUyc26OvvhAnQpZWPwTjqLynofK2Hc9m4juP0Gvi2GufW0kq7PRL
Fgq+5FEnu3943OmILp0vKvyoe9IfdCLeKE17O1+RKP7Bx97O7WsurGw+fmoQmN5HDyzOrf199MH+
v0mvkPSO/vmFmF8f8p5ieWdtO/0sH0PoUEWVHgq+5EVdxSRC9x+M+8sLg80fjAXj3O4Bj16RSCrr
RPRmE6P8H33U8RyMC/KCcYGXc7tUIu/+/vGkAl/hZ4a8Z0jeWcxMMA6h10kdHePMfhSR0vFKJxqL
xhMjux+9TjAuUDi3waVzi7yRN/KuqUBB6JBFOhDLVgz7/qnu/qIg44IT/Sg2Hzu1dOzqC77voy8r
XTmwb7c6+nHodIwTntut/zzVzZD0o+kLeTdS3ktj8o7U3gx529DdeuJkePSq5wcIHTTpefhyZy1c
0ktEL/1R6AXj3H/g4mBczsexmyo/+hrFjnHmOzVceU+AyJE38nba7kwE4xB6jRz76wsrm99TWjGs
icE4DxOYzcdPRcdeYxaMsxwjvAbjjr76wsrWD0vP7bC80/fmsjnyRt46252JYBxCb0KVHqisGOZ+
v7roMTHJBOYvLgw2/yOjY5z9I3WjVySSyjqpsCflaffjjzoaHeMOzy3yRt7Iu77tzkQwDqHXTd3B
uOJH4jSO13qiEZhPYELBj1/jUb37kv+9D3kjb+Rd5XZ3bMZJhA5eSau1M4Ivd7j52Klu2tc8Q3Ci
H0ZSTS8lVXWlwTiDXSwOxvlf0c0ZnvlG3si7Fnln0d1aDsOjpwcDhA6a9Dx8ubMWLult/uCU9Iek
GowL3H7wah3jtn54qpve++YribyRd6vlXTSeIXTQI33kbPP7pwbC++hRJ/v+b5xZDcubm0h+w+Jg
XCLeKG2DajwxMn8vr8E4QN7IuzZ55wl9qoNxCL0JaC2luhfKkgxw/i9Dv+bCYOtx/8G43FS52QAw
ULgSAcgbedcn76y/n/pgHEJvBnbBuMBQvIFDx7jJyl9rAiPZtnkwLl/epM2RN/KeXnlXUqAgdMgi
FcsZwZc6b8WwnvRHuvn4qaVjr2lJx7jJJwaQN/JG3rMp7yy6Wz8Lw6OvnN5gHEJvBj0PA0LWwiW9
RPQ+ZrXN6hiXP9N+dGiChLyRN/KubNuNk3fR2IHQQY/0kbPNHxTc/zX7QkedsmBc4PQ7qrdjXPZ+
D7b+89RSIuyR7ST/3+lneCvfKOSNvJF3znanOhiH0JtVpYeCL/RizhfZ6H51YF8NO3P01RcGWz/M
CMblyLvDZfO2Sfwe5I28GyDvLKY6GIfQm0KFHeMCu/2KlI43a6KBvFvO0T97vrf1o5ORx+8J8kbe
Prc71cE4hN4cUoGdEXyhw83HT3WPja0Y5um576WkqvZ9H/3ryWsFeU8lPaeBE3kjb/3tdrd+HoZH
XzGdwTiE3qxBUPqFnnw++zUXeluPn5L+OLwH4xJ5P9o5DLHBrAkdeSPvmj67IJjeYNzvMfY0g/1H
zgYjX+jxV8F3f/8V5fxBbLqtHBY5Q9NFeq87eV2TvCKFzffzvqCO3z+jL3/ptpuyH6rb3sl4edpn
rWNU/OwSeU+8OlN82Z0KvUkYdIwL3MQrbeSyxMlpt7w7+Wnzezu+290GSl33qLypvEvkncGgM5bL
OfI/B1N7aw+hN4uRYFxg/yORLaWav92IUzMV8q5ksnb0T56Pt358Enkjb+SN0Ge5QDcIxhX/SPJW
DLO9Pz/xw+DsTIW8s9CarJkH45A38kbeCH0K6Xn4kaQD9EjjhDShnojeWN6kzadW3tnb/tHJpaN/
+rxGN8Al5I28kTdCn0mOvsZixbB8sjsh7a0XHiLv2ZZ3zuCs0d63j7yRN/JG6LNO+iW+RvDjy+yE
lIj7Kj5a5J3DosJ79ZA38kbeCH3W6Tv8+IZ/GI/xETZW3mFn75ZI03qb+w/G/a/n462fnETeyBt5
I/SZpmch790fBpfNW8JeQ4tHGjg4R0rH69YxDnkjb0Do08DRV1+It/7zFPKelclaQwbnrR+fXDr6
JxUF45A38gaEPkNchbyncLL2p88Ptn50MnuVufrFphOMQ97IGxD6TA/8f34h5lOYWtJze3UDxaa/
7j3yRt6A0AG0yUmb/1VSVfsOGaaPc12tdiDuj4r5D8a96vl464mT9R4j8kbeCB1g5uSdRfYz/bLB
Oh0wz3jals+/jZQ+blkwDnkjb0DoAJbyzuK0kuDqlnf2Z/XTk0tJVV1fMM56v3d0Pg/kDQgdoNXy
zqvQvXL0T55f2frxyXSgDeuUd8HxVhOMQ97IGxA6gJK8s+gm8g0TCQ8UqtawZnlnbbvajnHIG3kD
QgfkrSy28ap14Pk9RpbJrUne1VyRiJ6Pt+KTyBt5A0KHGZR3uC+W051mtEf1H4wbXya3Oe1RtT7r
/fvoyBt5A0KHWRD5NckAek/HtOlKdYOzTjAuaNQxHp6HJ04uHb3KdzBuJz8Y1xR5K24beQNCh9li
b9DretyWLzQWLlnZ+mlJMK6+qlQvGIe8kTcgdJgJ9B/ncpxkbP3kZJhIeKBwvGFDjnGYeoJxyBsq
ZPu/w3B/8pq+5o+8fHA9QgfwVbX+6fO9rR+dbOqg7z8Y18kJxtUvNv9XJE4P4q3lEHlDE+R9ev+/
w1cD03OG0GG2GAqsLR39s+dvVXiLdFCMGjXoB5ck19yOcV7EdimwFintj7+lVJE3uMs7i+72LxbC
Iy+/OGj78SN0KJX37g8jGP1hJNV0nFTVscKgH3nepo+/bU7HOL/yzj73cbh0NBo0YylV5A1+5Z33
G9C4CofQoXnyzmFpf7DzSV/0r/WatPi/DH3V8ytbTzgE46rpsFbPUqr1yHtl/3uMvGdH3nm/8cfa
/jkhdORt8cMYQX+5zXrknUV366cnw6OvqjgYV1971OrObfXyTvdjef+/qbwHjAozJ++s797pafjs
EDrydkWjz3lv68cn65Z30fH6HvwPg3HN6m3u/9y+chBv/TxUOTHIG3l7+B0sTcPnidCRt8MIqvgD
CIaCcfXJO68FbE3BuMo7rEVKn6o4GIe8kbfSuNDd/uVCeORl7Q7GIXTk7fzjSKrpKKmqY4VBP6pZ
3llUFIxrRnvUrZ+FS0lVXWswDnkj74on9a0PxiH0dgg82pdcbfIu+AH4Fnq/oc8payxcsrIVh+kA
EtYp74LjrSwYh7yRd8Xyztpu64NxCL0NBJ23JP97TUPENkxDltusZADobj1xMjx6lXIwrjkLk6QD
86MaVySQN/JugLyzaH0wDqG3g+oe56q7an3V872tn55s0jGOz+AHnrddvpRqtceod25fMYi3/ytE
3si7CfLO+vvWB+MQejvo1f7jyBn0t35yspsuNuL5neJOUTCr3hawukup1n+MakLf5yTyRt4NkHcW
3e1fL4RH/qi9wTiE3gLSjmyZfc6bca9Z4z76YTCuUffTd9qzlKqH78jWz8Morap97g4yR94NkXfR
eIbQoYIqPVB7VEzyt5F3oQfCWwx+5F1J1Xr09GBl62cGwbh6BlCNyRog76bIO+8739pgHEJvC74W
t/DfpKXejnF68s6iu7UchomEBwrHG+oeo+FmA/VzC8i7KfLOotXBOITeHvqqP5AmPc511fO9rSdO
Ku23eOCqbinVauV9wHBgjeq8yfJOv4sB8va8L60OxiH09tBrgLyzCLd+erJ79FUVBOOql3ee0PWC
cfQ2B+Rdlbyz6G7/ZiE88tJ2BuMQektIO7Jd6nMeNO7H4f9eq/VSqkoD1+Tfnlb4HL02cEHeyBt5
d6RXHRE6KFN2H72+H0fUqXQp1crknfdj9ztZe8VgZevnbsE45I28kXdHowVsK4NxCL1dHAq9WT8O
xWBcrfLOorv1szA8+srqg3HIG3kj70qOsbXBOITergq939Afh0af895WHNYt76Lj9S3NkWAc8kbe
yLu2Y2xtMA6ht69Cb96PIw3GxSe76WIjnrcbd9LL+c3MDHi9JBdMLqWKvJF386Q5ffLOorv95EJ4
5CXtC8Yh9BZx9FXPx8Z9zqv/4VUfjKtvcNFaSvWTyBt5I+9GbLeVwTiE3s4qfamBP7yooxmMa9bg
4v2SXCLwtCK/la838kbejdjnVgbjEHobhd6EFrCTA1ezl1L1Owh0t34ehkdfQRWNvJH3FMg7i1YG
4xB62/AVjPPfpMV/MO6Vg97Wz8LGDQL7gbVWL+KAvJE38q52PEPokF2h1y/vrG2HW8thN11sxPPx
xh2TBjPVd1jr8lVE3sh7KuSd9bfd7f7C0pHFi702fecRess4etXzcWGf83rbo+oupVqfvEmbI2/k
Pb3yLhrPEDpUUqUvNaS3+fDfRp02LKWKvJE38kbe5bRutUGE3kaMllKtpcNa85ZSRd7IG3kjbzda
dx8dobeTfgPkXckP4OgrBr2tn4fIG3kjb+Rd9X5ECB0qYKfX0B9IuPWzsHv0ldUE45D3FEj8F+E1
yX/egryRdxP3Y/u3C0tH/rA9wTiE3kKORoN4os95c34gKsG4RN4R8p5KThdXQsgbedd6jK0KxiH0
tmJ0H72WH0jkW+jBXjAOeU8nfeQ9o/JuxzG2KhiH0NtLU1vAev8BJOJ+NPnPo5zyacTg9hHyRt71
HWOrgnG/x4DS4som2P8yB56/1EHGy5wlTs30sf3fYXf7F2HkfbL28ouxx++ev+91U/ajCdt12fb0
HGPUpt8pFXpb8dXn3P/sNtz+r7C7v9gItFTe+5VJOpgt7k/SQrVvjOntIypvKm/9bU/c2kPooM7R
Vw5i6z7nij+QscS5RjAOqpf35N8nVfqRlw98n1uZ0JE38vYk7yN/0L410BH69JA/EFYn79bPapG3
CTuakzXzboDIG3kjb4Q+1UKvVt6d/QG9tz8Qx6TNp1beWVTXDRB5I2/kjdBnBs99zg3knT4qRvU9
O/LOGkC9hx6PvOxivP2rBeSNvJE3Qp/5Ch15I28teWexpPhdXkLeyBt5N/O0QAVs/TwsHaWRN/L2
PApclVbVXo/1VwuPJPtxDfJG3rlj1B9cZIyiQp+JKn0JeU/ldPtJM4FX3mHNfzDO5+0j5I28ETq0
csyffIYXeU/XZC2sWd5Z223OMrnIG3kDQp8ivt7Zv6eEvKeNnXTAu7qBg7P/YNwfXYy3f72AvJE3
IPTZJZF4zKcwtaTn9kwDBaQfjJuFMBzyBoQOUD8ZgbXBkZcPrvcuuIYuTJJU01FaVXven3pbwCJv
5I3QAWZO3llp8/SRGq9CP/Lyiyvbv1wYdIyT7TryLqjSY88fdb+yY0TeyBuhAyDvHLrbv1gIEwkP
vFfptkKvpsNavcE45I28AaEDjMh7r3Wurbw7OWnz9N/7FnpxMK6+9qj+g3EvvRhv/2YBeSNvQOgA
lco7T3KPed71dNA9U7O8s7a9pLTtnsq2kTfyRugAyNtiAD2tJLhGSiKppqO0qvb8jnKhI2/kjdAB
kLdwcPZ/GfqPL65s/2ooGNesx7nqD8Yhb+QNCB1aLvJfhN/vNKe3+QHd7V8uhEde5jkYFzgE46oR
ULXBOOSNvAGhw1Rydc3yLqpaB563ad4xrloB+b8i8ZKL8faTC8gbeQNChxkiHfCiRvQ2H922bjCu
WQKqJxiHvAEQOkwTO709oTducD6t8I69hh3jJZJqOkqraoXj1W0Bi7wBoQOYMxRY6xx5ufee8/0m
iW3obzUWLlnZ/o2njnH+5dbspVSRNyB0AGd5p1XzeNr8MYVB3//g6Oc57+72rxfCRMIDhao1rPQY
zWjOUqrIGxA6gFd551VxfqvWl13sbf9yoW55Fx2vb6HbB+OquVzt/9wuXoy3+wv1HR/yBoQOyHuc
S4G1cPsXC910sRHPu5cOrFHN8s4TerXBuPruNesvpYq8ARA61CLvooE/Vhj0o5rlnYVuMK5hvc2T
ajpKq2rP+1JHC1jkDQgdkLfBABp1/N9HX07+97qa5V3J41xHXnpxZftJj8E4v895N28pVeQNCB2Q
txd5Z1FdeKr+1Ht3+zcLYSLhgcLxhkr7LPn86g/GIW8AhI68VeSdV8X5rVr/+OJg+1cLK8m+dGuU
d9Hx+hZ6eTCung5r/s/tH16Mt3+7gLwBEPoME+wOrk/ULO8swu1fLnSPvMxzMC7oFDeYqfYYxyWn
G4xrTntUvWBc8baRNwBCn17SBi7bvwjrlnfRwB973uqytdCruSSvE4wLaj7GnO0m1XSUVtUKk7Ul
5A2A0GeYHbOEcPX3mqOOTtK9Scc4XKH7nay95OLKdi8jGNeM1LvGZO2Tyes+5A1gxu/xEUwlvczB
efzlc+A327ZueKoZx3hAd/vJhVBBsj2VY5R/dt7PbSLyATIHQOizTr8RYpv8W40+54Nkuys1y7uy
Kr3j63Eu/9+PJX52AAgdNKq4+uWdRbj96wWNRHqvkmO0366G5GLxMepMfBA6AEIH71XryxzCSf7l
XWUlt9wAeWdtt7qlVOs7xoNJxr3bv1O4xQAAxhCKm17yg3H1tkeNOlUE45oTFPM7WVu8uLLd3w/G
1XOMcYe0OQBChxqE3oze5sPbXlTYdk95n13/vpvIN0wkPFA43rCCY0TeAAgdaicoCU9N1+Ncg+0n
F9KGNV2lfZb8fTOWUkXeAAgdWl2h1y3vLMLt3yx008VGFI43qlneeUKvdilV5A2A0GF6OPLHF+Pt
Xy/ULe8iycWeDzm7Y1z97VH1g3FB6d/29j8f5A2A0KHFVbrbJW7dqj7qaATjgkYd4/Dkxe9k7Q8v
rmz/LnMp1d5Y5R3zEwBA6DBLQq/+knz1wbj6bjt0t3+7ECYSHnh+1/Qyfhd5AwBCnw36DRLbMP6r
1qWLg+3efjCuWZmBg+P1KvRE4Nfz9QaAYWgsM81o9f3udKQNTMLtJxU6xgUOl931jlF1AgMAgNBn
iCMv9XQZtj29v5cbdowH2zrNtxEAEDpIsUs1V9c6NKrtWKtvj0qFDgDqcA99NoQubwFrK7ZydJdS
rfcYSZsDAEIH7/RVxWa7bc3HuRYvDrZ/K+wYh7wBAKFDI/Hd59xfb/hwu7/QTRcbUajSI+QNAAgd
poojL7kYbz/p2DFOf2GX6jrGIW8AQOgwBZQ3mKlnVbaoU8VSqsgbABA6TKXQm7Okqk4wDnkDAEKH
qaRsKdVq5J2FRp/zwfbvLgXjkDcAIHSYugq9bnlnEW7/dqGbLjbieQ+uYlUxAJi92g1mgu3+wk5D
v0FXJUKncgYAEEKnuFmu0l3k7b/DWsSpAQBA6KAl9Oraoy5yagAA5HAPfXboF0pWA7O0+TKnBgAA
oYNthU5vcwCAqYRQ3Ayx/VtPwTjkDQBAhQ61V+l2z34jbwAAhA4tEzryBgBA6NAK+sgbAAChQ9vJ
XkoVeQMATMUQDzPF9u8WHkHeAAAAAAAAAAAAAAAAAACZ/H8BBgAgCZajFsmZRwAAAABJRU5ErkJg
gg==
"
       id="image56" />
  </g>
</svg>
 + mediatype: image/svg+xml + install: + spec: + clusterPermissions: + - rules: + - nonResourceURLs: + - /api/v2/alerts + verbs: + - create + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - config.openshift.io + resources: + - apiservers + - dnses + - proxies + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + - apiGroups: + - loki.grafana.com + resources: + - alertingrules + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - loki.grafana.com + resources: + - alertingrules/finalizers + verbs: + - update + - apiGroups: + - loki.grafana.com + resources: + - alertingrules/status + verbs: + - get + - patch + - update + - apiGroups: + - loki.grafana.com + resources: + - lokistacks + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - loki.grafana.com + resources: + - lokistacks/finalizers + verbs: + - update + - apiGroups: + - loki.grafana.com + resources: + - lokistacks/status + verbs: + - get + - patch + - update + - apiGroups: + - loki.grafana.com + resources: + - recordingrules + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - loki.grafana.com + resources: + - recordingrules/finalizers + verbs: + - update + - apiGroups: + - loki.grafana.com + resources: + - recordingrules/status + verbs: + - get + - patch + - update + - apiGroups: + - loki.grafana.com + resources: + - rulerconfigs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - loki.grafana.com + resources: + - rulerconfigs/finalizers + verbs: + - update + - apiGroups: + - loki.grafana.com + resources: + - rulerconfigs/status + verbs: + - get + - patch + - update + - apiGroups: + - monitoring.coreos.com + resources: + - alertmanagers + verbs: + - patch + - apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + - servicemonitors + verbs: + - create + - delete + - get + - list + - update + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - get + - list + - update + - watch + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - create + - delete + - get + - list + - update + - watch + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + serviceAccountName: default + deployments: + - label: + app.kubernetes.io/instance: loki-operator-v0.1.0 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/version: 0.1.0 + control-plane: controller-manager + name: loki-operator-controller-manager + spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + name: loki-operator-controller-manager + strategy: {} + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + name: loki-operator-controller-manager + spec: + containers: + - args: + - --config=controller_manager_config.yaml + command: + - /manager + env: + - name: RELATED_IMAGE_LOKI + value: docker.io/grafana/loki:2.7.3 + - name: RELATED_IMAGE_GATEWAY + value: quay.io/observatorium/api:latest + image: docker.io/grafana/loki-operator:main-39f2856 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + - containerPort: 8080 + name: metrics + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: webhook-cert + readOnly: true + - mountPath: /controller_manager_config.yaml + name: manager-config + subPath: controller_manager_config.yaml + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=0 + image: quay.io/openshift/origin-kube-rbac-proxy:latest + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + terminationGracePeriodSeconds: 10 + volumes: + - name: webhook-cert + secret: + defaultMode: 420 + secretName: loki-operator-controller-manager-service-cert + - configMap: + name: loki-operator-manager-config + name: manager-config + permissions: + - rules: + - apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + serviceAccountName: default + strategy: deployment + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - logging + - loki + links: + - name: Documentation + url: https://loki-operator.dev/ + maintainers: + - email: loki-operator-team@googlegroups.com + name: Grafana Loki SIG Operator + maturity: alpha + minKubeVersion: 1.21.1 + provider: + name: Grafana Loki SIG Operator + relatedImages: + - image: docker.io/grafana/loki:2.7.3 + name: loki + - image: quay.io/observatorium/api:latest + name: gateway + version: 0.1.0 + webhookdefinitions: + - admissionReviewVersions: + - v1 + - v1beta1 + containerPort: 443 + conversionCRDs: + - lokistacks.loki.grafana.com + deploymentName: loki-operator-controller-manager + generateName: clokistacks.kb.io + sideEffects: None + targetPort: 9443 + type: ConversionWebhook + webhookPath: /convert + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: loki-operator-controller-manager + failurePolicy: Fail + generateName: valertingrule.loki.grafana.com + rules: + - apiGroups: + - loki.grafana.com + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - alertingrules + sideEffects: None + targetPort: 9443 + type: ValidatingAdmissionWebhook + webhookPath: /validate-loki-grafana-com-v1beta1-alertingrule + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: loki-operator-controller-manager + failurePolicy: Fail + generateName: vlokistack.loki.grafana.com + rules: + - apiGroups: + - loki.grafana.com + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - lokistacks + sideEffects: None + targetPort: 9443 + type: ValidatingAdmissionWebhook + webhookPath: /validate-loki-grafana-com-v1-lokistack + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: loki-operator-controller-manager + failurePolicy: Fail + generateName: vrecordingrule.loki.grafana.com + rules: + - apiGroups: + - loki.grafana.com + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - recordingrules + sideEffects: None + targetPort: 9443 + type: ValidatingAdmissionWebhook + webhookPath: /validate-loki-grafana-com-v1beta1-recordingrule + - admissionReviewVersions: + - v1 + containerPort: 443 + deploymentName: loki-operator-controller-manager + failurePolicy: Fail + generateName: vrulerconfig.loki.grafana.com + rules: + - apiGroups: + - loki.grafana.com + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - rulerconfigs + sideEffects: None + targetPort: 9443 + type: ValidatingAdmissionWebhook + webhookPath: /validate-loki-grafana-com-v1beta1-rulerconfig diff --git a/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml b/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml new file mode 100644 index 000000000000..f53562a26984 --- /dev/null +++ b/operator/bundle/community/manifests/loki.grafana.com_alertingrules.yaml @@ -0,0 +1,194 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.1.0 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/version: 0.1.0 + name: alertingrules.loki.grafana.com +spec: + group: loki.grafana.com + names: + kind: AlertingRule + listKind: AlertingRuleList + plural: alertingrules + singular: alertingrule + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: AlertingRule is the Schema for the alertingrules API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AlertingRuleSpec defines the desired state of AlertingRule + properties: + groups: + description: List of groups for alerting rules. + items: + description: AlertingRuleGroup defines a group of Loki alerting + rules. + properties: + interval: + default: 1m + description: Interval defines the time interval between evaluation + of the given alerting rule. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + limit: + description: Limit defines the number of alerts an alerting + rule can produce. 0 is no limit. + format: int32 + type: integer + name: + description: Name of the alerting rule group. Must be unique + within all alerting rules. + type: string + rules: + description: Rules defines a list of alerting rules + items: + description: AlertingRuleGroupSpec defines the spec for a + Loki alerting rule. + properties: + alert: + description: The name of the alert. Must be a valid label + value. + type: string + annotations: + additionalProperties: + type: string + description: Annotations to add to each alert. + type: object + expr: + description: The LogQL expression to evaluate. Every evaluation + cycle this is evaluated at the current time, and all + resultant time series become pending/firing alerts. + type: string + for: + description: Alerts are considered firing once they have + been returned for this long. Alerts which have not yet + fired for long enough are considered pending. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + labels: + additionalProperties: + type: string + description: Labels to add to each alert. + type: object + required: + - expr + type: object + type: array + required: + - name + - rules + type: object + type: array + tenantID: + description: TenantID of tenant where the alerting rules are evaluated + in. + type: string + required: + - tenantID + type: object + status: + description: AlertingRuleStatus defines the observed state of AlertingRule + properties: + conditions: + description: Conditions of the AlertingRule generation health. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml new file mode 100644 index 000000000000..23f2a59e0850 --- /dev/null +++ b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml @@ -0,0 +1,2478 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.1.0 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/version: 0.1.0 + name: lokistacks.loki.grafana.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: loki-operator-webhook-service + namespace: loki-operator + path: /convert + port: 443 + conversionReviewVersions: + - v1 + - v1beta1 + group: loki.grafana.com + names: + categories: + - logging + kind: LokiStack + listKind: LokiStackList + plural: lokistacks + singular: lokistack + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: LokiStack is the Schema for the lokistacks API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LokiStack CR spec field. + properties: + limits: + description: Limits defines the limits to be applied to log stream + processing. + properties: + global: + description: Global defines the limits applied globally across + the cluster. + properties: + ingestion: + description: IngestionLimits defines the limits applied on + ingested log streams. + properties: + ingestionBurstSize: + description: IngestionBurstSize defines the local rate-limited + sample size per distributor replica. It should be set + to the set at least to the maximum logs size expected + in a single push request. + format: int32 + type: integer + ingestionRate: + description: IngestionRate defines the sample size per + second. Units MB. + format: int32 + type: integer + maxGlobalStreamsPerTenant: + description: MaxGlobalStreamsPerTenant defines the maximum + number of active streams per tenant, across the cluster. + format: int32 + type: integer + maxLabelNameLength: + description: MaxLabelNameLength defines the maximum number + of characters allowed for label keys in log streams. + format: int32 + type: integer + maxLabelNamesPerSeries: + description: MaxLabelNamesPerSeries defines the maximum + number of label names per series in each log stream. + format: int32 + type: integer + maxLabelValueLength: + description: MaxLabelValueLength defines the maximum number + of characters allowed for label values in log streams. + format: int32 + type: integer + maxLineSize: + description: MaxLineSize defines the maximum line size + on ingestion path. Units in Bytes. + format: int32 + type: integer + type: object + queries: + description: QueryLimits defines the limit applied on querying + log streams. + properties: + maxChunksPerQuery: + description: MaxChunksPerQuery defines the maximum number + of chunks that can be fetched by a single query. + format: int32 + type: integer + maxEntriesLimitPerQuery: + description: MaxEntriesLimitsPerQuery defines the maximum + number of log entries that will be returned for a query. + format: int32 + type: integer + maxQuerySeries: + description: MaxQuerySeries defines the the maximum of + unique series that is returned by a metric query. + format: int32 + type: integer + queryTimeout: + default: 1m + description: Timeout when querying ingesters or storage + during the execution of a query request. + type: string + type: object + retention: + description: Retention defines how long logs are kept in storage. + properties: + days: + description: Days contains the number of days logs are + kept. + minimum: 1 + type: integer + streams: + description: Stream defines the log stream. + items: + description: RetentionStreamSpec defines a log stream + with separate retention time. + properties: + days: + description: Days contains the number of days logs + are kept. + minimum: 1 + type: integer + priority: + default: 1 + description: Priority defines the priority of this + selector compared to other retention rules. + format: int32 + type: integer + selector: + description: Selector contains the LogQL query used + to define the log stream. + type: string + required: + - days + - selector + type: object + type: array + required: + - days + type: object + type: object + tenants: + additionalProperties: + description: LimitsTemplateSpec defines the limits applied + at ingestion or query path. + properties: + ingestion: + description: IngestionLimits defines the limits applied + on ingested log streams. + properties: + ingestionBurstSize: + description: IngestionBurstSize defines the local rate-limited + sample size per distributor replica. It should be + set to the set at least to the maximum logs size expected + in a single push request. + format: int32 + type: integer + ingestionRate: + description: IngestionRate defines the sample size per + second. Units MB. + format: int32 + type: integer + maxGlobalStreamsPerTenant: + description: MaxGlobalStreamsPerTenant defines the maximum + number of active streams per tenant, across the cluster. + format: int32 + type: integer + maxLabelNameLength: + description: MaxLabelNameLength defines the maximum + number of characters allowed for label keys in log + streams. + format: int32 + type: integer + maxLabelNamesPerSeries: + description: MaxLabelNamesPerSeries defines the maximum + number of label names per series in each log stream. + format: int32 + type: integer + maxLabelValueLength: + description: MaxLabelValueLength defines the maximum + number of characters allowed for label values in log + streams. + format: int32 + type: integer + maxLineSize: + description: MaxLineSize defines the maximum line size + on ingestion path. Units in Bytes. + format: int32 + type: integer + type: object + queries: + description: QueryLimits defines the limit applied on querying + log streams. + properties: + maxChunksPerQuery: + description: MaxChunksPerQuery defines the maximum number + of chunks that can be fetched by a single query. + format: int32 + type: integer + maxEntriesLimitPerQuery: + description: MaxEntriesLimitsPerQuery defines the maximum + number of log entries that will be returned for a + query. + format: int32 + type: integer + maxQuerySeries: + description: MaxQuerySeries defines the the maximum + of unique series that is returned by a metric query. + format: int32 + type: integer + queryTimeout: + default: 1m + description: Timeout when querying ingesters or storage + during the execution of a query request. + type: string + type: object + retention: + description: Retention defines how long logs are kept in + storage. + properties: + days: + description: Days contains the number of days logs are + kept. + minimum: 1 + type: integer + streams: + description: Stream defines the log stream. + items: + description: RetentionStreamSpec defines a log stream + with separate retention time. + properties: + days: + description: Days contains the number of days + logs are kept. + minimum: 1 + type: integer + priority: + default: 1 + description: Priority defines the priority of + this selector compared to other retention rules. + format: int32 + type: integer + selector: + description: Selector contains the LogQL query + used to define the log stream. + type: string + required: + - days + - selector + type: object + type: array + required: + - days + type: object + type: object + description: Tenants defines the limits applied per tenant. + type: object + type: object + managementState: + default: Managed + description: ManagementState defines if the CR should be managed by + the operator or not. Default is managed. + enum: + - Managed + - Unmanaged + type: string + proxy: + description: Proxy defines the spec for the object proxy to configure + cluster proxy information. + properties: + httpProxy: + description: HTTPProxy configures the HTTP_PROXY/http_proxy env + variable. + type: string + httpsProxy: + description: HTTPSProxy configures the HTTPS_PROXY/https_proxy + env variable. + type: string + noProxy: + description: NoProxy configures the NO_PROXY/no_proxy env variable. + type: string + type: object + replicationFactor: + description: ReplicationFactor defines the policy for log stream replication. + format: int32 + minimum: 1 + type: integer + rules: + description: Rules defines the spec for the ruler component + properties: + enabled: + description: Enabled defines a flag to enable/disable the ruler + component + type: boolean + namespaceSelector: + description: Namespaces to be selected for PrometheusRules discovery. + If unspecified, only the same namespace as the LokiStack object + is in is used. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + selector: + description: A selector to select which LokiRules to mount for + loading alerting/recording rules from. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - enabled + type: object + size: + description: Size defines one of the support Loki deployment scale + out sizes. + enum: + - 1x.extra-small + - 1x.small + - 1x.medium + type: string + storage: + description: Storage defines the spec for the object storage endpoint + to store logs. + properties: + schemas: + default: + - effectiveDate: "2020-10-11" + version: v11 + description: Schemas for reading and writing logs. + items: + description: ObjectStorageSchema defines the requirements needed + to configure a new storage schema. + properties: + effectiveDate: + description: EffectiveDate is the date in UTC that the schema + will be applied on. To ensure readibility of logs, this + date should be before the current date in UTC. + pattern: ^([0-9]{4,})([-]([0-9]{2})){2}$ + type: string + version: + description: Version for writing and reading logs. + enum: + - v11 + - v12 + type: string + required: + - effectiveDate + - version + type: object + minItems: 1 + type: array + secret: + description: Secret for object storage authentication. Name of + a secret in the same namespace as the LokiStack custom resource. + properties: + name: + description: Name of a secret in the namespace configured + for object storage secrets. + type: string + type: + description: Type of object storage that should be used + enum: + - azure + - gcs + - s3 + - swift + type: string + required: + - name + - type + type: object + tls: + description: TLS configuration for reaching the object storage + endpoint. + properties: + caKey: + description: Key is the data key of a ConfigMap containing + a CA certificate. It needs to be in the same namespace as + the LokiStack custom resource. If empty, it defaults to + "service-ca.crt". + type: string + caName: + description: CA is the name of a ConfigMap containing a CA + certificate. It needs to be in the same namespace as the + LokiStack custom resource. + type: string + required: + - caName + type: object + required: + - secret + type: object + storageClassName: + description: Storage class name defines the storage class for ingester/querier + PVCs. + type: string + template: + description: Template defines the resource/limits/tolerations/nodeselectors + per component + properties: + compactor: + description: Compactor defines the compaction component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + distributor: + description: Distributor defines the distributor component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + gateway: + description: Gateway defines the lokistack gateway component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + indexGateway: + description: IndexGateway defines the index gateway component + spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + ingester: + description: Ingester defines the ingester component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + querier: + description: Querier defines the querier component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + queryFrontend: + description: QueryFrontend defines the query frontend component + spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + ruler: + description: Ruler defines the ruler component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + tenants: + description: Tenants defines the per-tenant authentication and authorization + spec for the lokistack-gateway component. + properties: + authentication: + description: Authentication defines the lokistack-gateway component + authentication configuration spec per tenant. + items: + description: AuthenticationSpec defines the oidc configuration + per tenant for lokiStack Gateway component. + properties: + oidc: + description: OIDC defines the spec for the OIDC tenant's + authentication. + properties: + groupClaim: + description: Group claim field from ID Token + type: string + issuerURL: + description: IssuerURL defines the URL for issuer. + type: string + redirectURL: + description: RedirectURL defines the URL for redirect. + type: string + secret: + description: Secret defines the spec for the clientID, + clientSecret and issuerCAPath for tenant's authentication. + properties: + name: + description: Name of a secret in the namespace configured + for tenant secrets. + type: string + required: + - name + type: object + usernameClaim: + description: User claim field from ID Token + type: string + required: + - issuerURL + - secret + type: object + tenantId: + description: TenantID defines the id of the tenant. + type: string + tenantName: + description: TenantName defines the name of the tenant. + type: string + required: + - oidc + - tenantId + - tenantName + type: object + type: array + authorization: + description: Authorization defines the lokistack-gateway component + authorization configuration spec per tenant. + properties: + opa: + description: OPA defines the spec for the third-party endpoint + for tenant's authorization. + properties: + url: + description: URL defines the third-party endpoint for + authorization. + type: string + required: + - url + type: object + roleBindings: + description: RoleBindings defines configuration to bind a + set of roles to a set of subjects. + items: + description: RoleBindingsSpec binds a set of roles to a + set of subjects. + properties: + name: + type: string + roles: + items: + type: string + type: array + subjects: + items: + description: Subject represents a subject that has + been bound to a role. + properties: + kind: + description: SubjectKind is a kind of LokiStack + Gateway RBAC subject. + enum: + - user + - group + type: string + name: + type: string + required: + - kind + - name + type: object + type: array + required: + - name + - roles + - subjects + type: object + type: array + roles: + description: Roles defines a set of permissions to interact + with a tenant. + items: + description: RoleSpec describes a set of permissions to + interact with a tenant. + properties: + name: + type: string + permissions: + items: + description: PermissionType is a LokiStack Gateway + RBAC permission. + enum: + - read + - write + type: string + type: array + resources: + items: + type: string + type: array + tenants: + items: + type: string + type: array + required: + - name + - permissions + - resources + - tenants + type: object + type: array + type: object + mode: + default: openshift-logging + description: Mode defines the mode in which lokistack-gateway + component will be configured. + enum: + - static + - dynamic + - openshift-logging + - openshift-network + type: string + required: + - mode + type: object + required: + - size + - storage + - storageClassName + type: object + status: + description: LokiStack CR spec Status. + properties: + components: + description: Components provides summary of all Loki pod status grouped + per component. + properties: + compactor: + additionalProperties: + items: + type: string + type: array + description: Compactor is a map to the pod status of the compactor + pod. + type: object + distributor: + additionalProperties: + items: + type: string + type: array + description: Distributor is a map to the per pod status of the + distributor deployment + type: object + gateway: + additionalProperties: + items: + type: string + type: array + description: Gateway is a map to the per pod status of the lokistack + gateway deployment. + type: object + indexGateway: + additionalProperties: + items: + type: string + type: array + description: IndexGateway is a map to the per pod status of the + index gateway statefulset + type: object + ingester: + additionalProperties: + items: + type: string + type: array + description: Ingester is a map to the per pod status of the ingester + statefulset + type: object + querier: + additionalProperties: + items: + type: string + type: array + description: Querier is a map to the per pod status of the querier + deployment + type: object + queryFrontend: + additionalProperties: + items: + type: string + type: array + description: QueryFrontend is a map to the per pod status of the + query frontend deployment + type: object + ruler: + additionalProperties: + items: + type: string + type: array + description: Ruler is a map to the per pod status of the lokistack + ruler statefulset. + type: object + type: object + conditions: + description: Conditions of the Loki deployment health. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + storage: + description: Storage provides summary of all changes that have occurred + to the storage configuration. + properties: + schemas: + description: Schemas is a list of schemas which have been applied + to the LokiStack. + items: + description: ObjectStorageSchema defines the requirements needed + to configure a new storage schema. + properties: + effectiveDate: + description: EffectiveDate is the date in UTC that the schema + will be applied on. To ensure readibility of logs, this + date should be before the current date in UTC. + pattern: ^([0-9]{4,})([-]([0-9]{2})){2}$ + type: string + version: + description: Version for writing and reading logs. + enum: + - v11 + - v12 + type: string + required: + - effectiveDate + - version + type: object + type: array + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} + - name: v1beta1 + schema: + openAPIV3Schema: + description: LokiStack is the Schema for the lokistacks API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LokiStackSpec defines the desired state of LokiStack + properties: + limits: + description: Limits defines the per-tenant limits to be applied to + log stream processing and the per-tenant the config overrides. + properties: + global: + description: Global defines the limits applied globally across + the cluster. + properties: + ingestion: + description: IngestionLimits defines the limits applied on + ingested log streams. + properties: + ingestionBurstSize: + description: IngestionBurstSize defines the local rate-limited + sample size per distributor replica. It should be set + to the set at least to the maximum logs size expected + in a single push request. + format: int32 + type: integer + ingestionRate: + description: IngestionRate defines the sample size per + second. Units MB. + format: int32 + type: integer + maxGlobalStreamsPerTenant: + description: MaxGlobalStreamsPerTenant defines the maximum + number of active streams per tenant, across the cluster. + format: int32 + type: integer + maxLabelNameLength: + description: MaxLabelNameLength defines the maximum number + of characters allowed for label keys in log streams. + format: int32 + type: integer + maxLabelNamesPerSeries: + description: MaxLabelNamesPerSeries defines the maximum + number of label names per series in each log stream. + format: int32 + type: integer + maxLabelValueLength: + description: MaxLabelValueLength defines the maximum number + of characters allowed for label values in log streams. + format: int32 + type: integer + maxLineSize: + description: MaxLineSize defines the maximum line size + on ingestion path. Units in Bytes. + format: int32 + type: integer + type: object + queries: + description: QueryLimits defines the limit applied on querying + log streams. + properties: + maxChunksPerQuery: + description: MaxChunksPerQuery defines the maximum number + of chunks that can be fetched by a single query. + format: int32 + type: integer + maxEntriesLimitPerQuery: + description: MaxEntriesLimitsPerQuery defines the maximum + number of log entries that will be returned for a query. + format: int32 + type: integer + maxQuerySeries: + description: MaxQuerySeries defines the the maximum of + unique series that is returned by a metric query. + format: int32 + type: integer + type: object + type: object + tenants: + additionalProperties: + description: LimitsTemplateSpec defines the limits and overrides + applied per-tenant. + properties: + ingestion: + description: IngestionLimits defines the limits applied + on ingested log streams. + properties: + ingestionBurstSize: + description: IngestionBurstSize defines the local rate-limited + sample size per distributor replica. It should be + set to the set at least to the maximum logs size expected + in a single push request. + format: int32 + type: integer + ingestionRate: + description: IngestionRate defines the sample size per + second. Units MB. + format: int32 + type: integer + maxGlobalStreamsPerTenant: + description: MaxGlobalStreamsPerTenant defines the maximum + number of active streams per tenant, across the cluster. + format: int32 + type: integer + maxLabelNameLength: + description: MaxLabelNameLength defines the maximum + number of characters allowed for label keys in log + streams. + format: int32 + type: integer + maxLabelNamesPerSeries: + description: MaxLabelNamesPerSeries defines the maximum + number of label names per series in each log stream. + format: int32 + type: integer + maxLabelValueLength: + description: MaxLabelValueLength defines the maximum + number of characters allowed for label values in log + streams. + format: int32 + type: integer + maxLineSize: + description: MaxLineSize defines the maximum line size + on ingestion path. Units in Bytes. + format: int32 + type: integer + type: object + queries: + description: QueryLimits defines the limit applied on querying + log streams. + properties: + maxChunksPerQuery: + description: MaxChunksPerQuery defines the maximum number + of chunks that can be fetched by a single query. + format: int32 + type: integer + maxEntriesLimitPerQuery: + description: MaxEntriesLimitsPerQuery defines the maximum + number of log entries that will be returned for a + query. + format: int32 + type: integer + maxQuerySeries: + description: MaxQuerySeries defines the the maximum + of unique series that is returned by a metric query. + format: int32 + type: integer + type: object + type: object + description: Tenants defines the limits and overrides applied + per tenant. + type: object + type: object + managementState: + default: Managed + description: ManagementState defines if the CR should be managed by + the operator or not. Default is managed. + enum: + - Managed + - Unmanaged + type: string + replicationFactor: + default: 1 + description: ReplicationFactor defines the policy for log stream replication. + format: int32 + minimum: 1 + type: integer + rules: + description: Rules defines the spec for the ruler component + properties: + enabled: + description: Enabled defines a flag to enable/disable the ruler + component + type: boolean + namespaceSelector: + description: Namespaces to be selected for PrometheusRules discovery. + If unspecified, only the same namespace as the LokiStack object + is in is used. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + selector: + description: A selector to select which LokiRules to mount for + loading alerting/recording rules from. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - enabled + type: object + size: + description: Size defines one of the support Loki deployment scale + out sizes. + enum: + - 1x.extra-small + - 1x.small + - 1x.medium + type: string + storage: + description: Storage defines the spec for the object storage endpoint + to store logs. + properties: + schemas: + default: + - effectiveDate: "2020-10-11" + version: v11 + description: Schemas for reading and writing logs. + items: + description: ObjectStorageSchema defines the requirements needed + to configure a new storage schema. + properties: + effectiveDate: + description: EffectiveDate is the date in UTC that the schema + will be applied on. To ensure readibility of logs, this + date should be before the current date in UTC. + pattern: ^([0-9]{4,})([-]([0-9]{2})){2}$ + type: string + version: + description: Version for writing and reading logs. + enum: + - v11 + - v12 + type: string + required: + - effectiveDate + - version + type: object + minItems: 1 + type: array + secret: + description: Secret for object storage authentication. Name of + a secret in the same namespace as the LokiStack custom resource. + properties: + name: + description: Name of a secret in the namespace configured + for object storage secrets. + type: string + type: + description: Type of object storage that should be used + enum: + - azure + - gcs + - s3 + - swift + type: string + required: + - name + - type + type: object + tls: + description: TLS configuration for reaching the object storage + endpoint. + properties: + caName: + description: CA is the name of a ConfigMap containing a CA + certificate. It needs to be in the same namespace as the + LokiStack custom resource. + type: string + type: object + required: + - secret + type: object + storageClassName: + description: Storage class name defines the storage class for ingester/querier + PVCs. + type: string + template: + description: Template defines the resource/limits/tolerations/nodeselectors + per component + properties: + compactor: + description: Compactor defines the compaction component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + distributor: + description: Distributor defines the distributor component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + gateway: + description: Gateway defines the lokistack gateway component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + indexGateway: + description: IndexGateway defines the index gateway component + spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + ingester: + description: Ingester defines the ingester component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + querier: + description: Querier defines the querier component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + queryFrontend: + description: QueryFrontend defines the query frontend component + spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + ruler: + description: Ruler defines the ruler component spec. + properties: + nodeSelector: + additionalProperties: + type: string + description: NodeSelector defines the labels required by a + node to schedule the component onto it. + type: object + replicas: + description: Replicas defines the number of replica pods of + the component. + format: int32 + type: integer + tolerations: + description: Tolerations defines the tolerations required + by a node to schedule the component onto it. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + tenants: + description: Tenants defines the per-tenant authentication and authorization + spec for the lokistack-gateway component. + properties: + authentication: + description: Authentication defines the lokistack-gateway component + authentication configuration spec per tenant. + items: + description: AuthenticationSpec defines the oidc configuration + per tenant for lokiStack Gateway component. + properties: + oidc: + description: OIDC defines the spec for the OIDC tenant's + authentication. + properties: + groupClaim: + description: Group claim field from ID Token + type: string + issuerURL: + description: IssuerURL defines the URL for issuer. + type: string + redirectURL: + description: RedirectURL defines the URL for redirect. + type: string + secret: + description: Secret defines the spec for the clientID, + clientSecret and issuerCAPath for tenant's authentication. + properties: + name: + description: Name of a secret in the namespace configured + for tenant secrets. + type: string + required: + - name + type: object + usernameClaim: + description: User claim field from ID Token + type: string + required: + - issuerURL + - secret + type: object + tenantId: + description: TenantID defines the id of the tenant. + type: string + tenantName: + description: TenantName defines the name of the tenant. + type: string + required: + - oidc + - tenantId + - tenantName + type: object + type: array + authorization: + description: Authorization defines the lokistack-gateway component + authorization configuration spec per tenant. + properties: + opa: + description: OPA defines the spec for the third-party endpoint + for tenant's authorization. + properties: + url: + description: URL defines the third-party endpoint for + authorization. + type: string + required: + - url + type: object + roleBindings: + description: RoleBindings defines configuration to bind a + set of roles to a set of subjects. + items: + description: RoleBindingsSpec binds a set of roles to a + set of subjects. + properties: + name: + type: string + roles: + items: + type: string + type: array + subjects: + items: + description: Subject represents a subject that has + been bound to a role. + properties: + kind: + description: SubjectKind is a kind of LokiStack + Gateway RBAC subject. + enum: + - user + - group + type: string + name: + type: string + required: + - kind + - name + type: object + type: array + required: + - name + - roles + - subjects + type: object + type: array + roles: + description: Roles defines a set of permissions to interact + with a tenant. + items: + description: RoleSpec describes a set of permissions to + interact with a tenant. + properties: + name: + type: string + permissions: + items: + description: PermissionType is a LokiStack Gateway + RBAC permission. + enum: + - read + - write + type: string + type: array + resources: + items: + type: string + type: array + tenants: + items: + type: string + type: array + required: + - name + - permissions + - resources + - tenants + type: object + type: array + type: object + mode: + default: openshift-logging + description: Mode defines the mode in which lokistack-gateway + component will be configured. + enum: + - static + - dynamic + - openshift-logging + type: string + required: + - mode + type: object + required: + - size + - storage + - storageClassName + type: object + status: + description: LokiStackStatus defines the observed state of LokiStack + properties: + components: + description: Components provides summary of all Loki pod status grouped + per component. + properties: + compactor: + additionalProperties: + items: + type: string + type: array + description: Compactor is a map to the pod status of the compactor + pod. + type: object + distributor: + additionalProperties: + items: + type: string + type: array + description: Distributor is a map to the per pod status of the + distributor deployment + type: object + gateway: + additionalProperties: + items: + type: string + type: array + description: Gateway is a map to the per pod status of the lokistack + gateway deployment. + type: object + indexGateway: + additionalProperties: + items: + type: string + type: array + description: IndexGateway is a map to the per pod status of the + index gateway statefulset + type: object + ingester: + additionalProperties: + items: + type: string + type: array + description: Ingester is a map to the per pod status of the ingester + statefulset + type: object + querier: + additionalProperties: + items: + type: string + type: array + description: Querier is a map to the per pod status of the querier + deployment + type: object + queryFrontend: + additionalProperties: + items: + type: string + type: array + description: QueryFrontend is a map to the per pod status of the + query frontend deployment + type: object + ruler: + additionalProperties: + items: + type: string + type: array + description: Ruler is a map to the per pod status of the lokistack + ruler statefulset. + type: object + type: object + conditions: + description: Conditions of the Loki deployment health. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + storage: + description: Storage provides summary of all changes that have occurred + to the storage configuration. + properties: + schemas: + description: Schemas is a list of schemas which have been applied + to the LokiStack. + items: + description: ObjectStorageSchema defines the requirements needed + to configure a new storage schema. + properties: + effectiveDate: + description: EffectiveDate is the date in UTC that the schema + will be applied on. To ensure readibility of logs, this + date should be before the current date in UTC. + pattern: ^([0-9]{4,})([-]([0-9]{2})){2}$ + type: string + version: + description: Version for writing and reading logs. + enum: + - v11 + - v12 + type: string + required: + - effectiveDate + - version + type: object + type: array + type: object + type: object + type: object + served: false + storage: false + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml b/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml new file mode 100644 index 000000000000..b4c32636d9fb --- /dev/null +++ b/operator/bundle/community/manifests/loki.grafana.com_recordingrules.yaml @@ -0,0 +1,178 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.1.0 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/version: 0.1.0 + name: recordingrules.loki.grafana.com +spec: + group: loki.grafana.com + names: + kind: RecordingRule + listKind: RecordingRuleList + plural: recordingrules + singular: recordingrule + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: RecordingRule is the Schema for the recordingrules API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RecordingRuleSpec defines the desired state of RecordingRule + properties: + groups: + description: List of groups for recording rules. + items: + description: RecordingRuleGroup defines a group of Loki recording + rules. + properties: + interval: + default: 1m + description: Interval defines the time interval between evaluation + of the given recoding rule. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + limit: + description: Limit defines the number of series a recording + rule can produce. 0 is no limit. + format: int32 + type: integer + name: + description: Name of the recording rule group. Must be unique + within all recording rules. + type: string + rules: + description: Rules defines a list of recording rules + items: + description: RecordingRuleGroupSpec defines the spec for a + Loki recording rule. + properties: + expr: + description: The LogQL expression to evaluate. Every evaluation + cycle this is evaluated at the current time, and all + resultant time series become pending/firing alerts. + type: string + record: + description: The name of the time series to output to. + Must be a valid metric name. + type: string + required: + - expr + type: object + type: array + required: + - name + - rules + type: object + type: array + tenantID: + description: TenantID of tenant where the recording rules are evaluated + in. + type: string + required: + - tenantID + type: object + status: + description: RecordingRuleStatus defines the observed state of RecordingRule + properties: + conditions: + description: Conditions of the RecordingRule generation health. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml b/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml new file mode 100644 index 000000000000..d9f182e10862 --- /dev/null +++ b/operator/bundle/community/manifests/loki.grafana.com_rulerconfigs.yaml @@ -0,0 +1,693 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + labels: + app.kubernetes.io/instance: loki-operator-v0.1.0 + app.kubernetes.io/managed-by: operator-lifecycle-manager + app.kubernetes.io/name: loki-operator + app.kubernetes.io/part-of: loki-operator + app.kubernetes.io/version: 0.1.0 + name: rulerconfigs.loki.grafana.com +spec: + group: loki.grafana.com + names: + kind: RulerConfig + listKind: RulerConfigList + plural: rulerconfigs + singular: rulerconfig + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: RulerConfig is the Schema for the rulerconfigs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RulerConfigSpec defines the desired state of Ruler + properties: + alertmanager: + description: Defines alert manager configuration to notify on firing + alerts. + properties: + client: + description: Client configuration for reaching the alertmanager + endpoint. + properties: + basicAuth: + description: Basic authentication configuration for reaching + the alertmanager endpoints. + properties: + password: + description: The subject's password for the basic authentication + configuration. + type: string + username: + description: The subject's username for the basic authentication + configuration. + type: string + type: object + headerAuth: + description: Header authentication configuration for reaching + the alertmanager endpoints. + properties: + credentials: + description: The credentials for the header authentication + configuration. + type: string + credentialsFile: + description: The credentials file for the Header authentication + configuration. It is mutually exclusive with `credentials`. + type: string + type: + description: The authentication type for the header authentication + configuration. + type: string + type: object + tls: + description: TLS configuration for reaching the alertmanager + endpoints. + properties: + caPath: + description: The CA certificate file path for the TLS + configuration. + type: string + certPath: + description: The client-side certificate file path for + the TLS configuration. + type: string + keyPath: + description: The client-side key file path for the TLS + configuration. + type: string + serverName: + description: The server name to validate in the alertmanager + server certificates. + type: string + type: object + type: object + discovery: + description: Defines the configuration for DNS-based discovery + of AlertManager hosts. + properties: + enableSRV: + description: Use DNS SRV records to discover Alertmanager + hosts. + type: boolean + refreshInterval: + default: 1m + description: How long to wait between refreshing DNS resolutions + of Alertmanager hosts. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + type: object + enableV2: + description: If enabled, then requests to Alertmanager use the + v2 API. + type: boolean + endpoints: + description: List of AlertManager URLs to send notifications to. + Each Alertmanager URL is treated as a separate group in the + configuration. Multiple Alertmanagers in HA per group can be + supported by using DNS resolution (See EnableDNSDiscovery). + items: + type: string + type: array + externalLabels: + additionalProperties: + type: string + description: Additional labels to add to all alerts. + type: object + externalUrl: + description: URL for alerts return path. + type: string + notificationQueue: + description: Defines the configuration for the notification queue + to AlertManager hosts. + properties: + capacity: + default: 10000 + description: Capacity of the queue for notifications to be + sent to the Alertmanager. + format: int32 + type: integer + forGracePeriod: + default: 10m + description: Minimum duration between alert and restored "for" + state. This is maintained only for alerts with configured + "for" time greater than the grace period. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + forOutageTolerance: + default: 1h + description: Max time to tolerate outage for restoring "for" + state of alert. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + resendDelay: + default: 1m + description: Minimum amount of time to wait before resending + an alert to Alertmanager. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + timeout: + default: 10s + description: HTTP timeout duration when sending notifications + to the Alertmanager. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + type: object + relabelConfigs: + description: List of alert relabel configurations. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + `` and `` sections + of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + default: replace + description: Action to perform based on regex matching. + Default is 'replace' + enum: + - drop + - hashmod + - keep + - labeldrop + - labelkeep + - labelmap + - replace + type: string + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + default: (.*) + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + default: $1 + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + default: ; + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + required: + - sourceLabels + type: object + type: array + required: + - endpoints + type: object + evaluationInterval: + default: 1m + description: Interval on how frequently to evaluate rules. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + overrides: + additionalProperties: + description: RulerOverrides defines the overrides applied per-tenant. + properties: + alertmanager: + description: AlertManagerOverrides defines the overrides to + apply to the alertmanager config. + properties: + client: + description: Client configuration for reaching the alertmanager + endpoint. + properties: + basicAuth: + description: Basic authentication configuration for + reaching the alertmanager endpoints. + properties: + password: + description: The subject's password for the basic + authentication configuration. + type: string + username: + description: The subject's username for the basic + authentication configuration. + type: string + type: object + headerAuth: + description: Header authentication configuration for + reaching the alertmanager endpoints. + properties: + credentials: + description: The credentials for the header authentication + configuration. + type: string + credentialsFile: + description: The credentials file for the Header + authentication configuration. It is mutually exclusive + with `credentials`. + type: string + type: + description: The authentication type for the header + authentication configuration. + type: string + type: object + tls: + description: TLS configuration for reaching the alertmanager + endpoints. + properties: + caPath: + description: The CA certificate file path for the + TLS configuration. + type: string + certPath: + description: The client-side certificate file path + for the TLS configuration. + type: string + keyPath: + description: The client-side key file path for the + TLS configuration. + type: string + serverName: + description: The server name to validate in the + alertmanager server certificates. + type: string + type: object + type: object + discovery: + description: Defines the configuration for DNS-based discovery + of AlertManager hosts. + properties: + enableSRV: + description: Use DNS SRV records to discover Alertmanager + hosts. + type: boolean + refreshInterval: + default: 1m + description: How long to wait between refreshing DNS + resolutions of Alertmanager hosts. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + type: object + enableV2: + description: If enabled, then requests to Alertmanager use + the v2 API. + type: boolean + endpoints: + description: List of AlertManager URLs to send notifications + to. Each Alertmanager URL is treated as a separate group + in the configuration. Multiple Alertmanagers in HA per + group can be supported by using DNS resolution (See EnableDNSDiscovery). + items: + type: string + type: array + externalLabels: + additionalProperties: + type: string + description: Additional labels to add to all alerts. + type: object + externalUrl: + description: URL for alerts return path. + type: string + notificationQueue: + description: Defines the configuration for the notification + queue to AlertManager hosts. + properties: + capacity: + default: 10000 + description: Capacity of the queue for notifications + to be sent to the Alertmanager. + format: int32 + type: integer + forGracePeriod: + default: 10m + description: Minimum duration between alert and restored + "for" state. This is maintained only for alerts with + configured "for" time greater than the grace period. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + forOutageTolerance: + default: 1h + description: Max time to tolerate outage for restoring + "for" state of alert. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + resendDelay: + default: 1m + description: Minimum amount of time to wait before resending + an alert to Alertmanager. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + timeout: + default: 10s + description: HTTP timeout duration when sending notifications + to the Alertmanager. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + type: object + relabelConfigs: + description: List of alert relabel configurations. + items: + description: 'RelabelConfig allows dynamic rewriting of + the label set, being applied to samples before ingestion. + It defines `` and `` + sections of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + default: replace + description: Action to perform based on regex matching. + Default is 'replace' + enum: + - drop + - hashmod + - keep + - labeldrop + - labelkeep + - labelmap + - replace + type: string + modulus: + description: Modulus to take of the hash of the source + label values. + format: int64 + type: integer + regex: + default: (.*) + description: Regular expression against which the + extracted value is matched. Default is '(.*)' + type: string + replacement: + default: $1 + description: Replacement value against which a regex + replace is performed if the regular expression matches. + Regex capture groups are available. Default is '$1' + type: string + separator: + default: ; + description: Separator placed between concatenated + source label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from + existing labels. Their content is concatenated using + the configured separator and matched against the + configured regular expression for the replace, keep, + and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is + written in a replace action. It is mandatory for + replace actions. Regex capture groups are available. + type: string + required: + - sourceLabels + type: object + type: array + required: + - endpoints + type: object + type: object + description: Overrides defines the config overrides to be applied + per-tenant. + type: object + pollInterval: + default: 1m + description: Interval on how frequently to poll for new rule definitions. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + remoteWrite: + description: Defines a remote write endpoint to write recording rule + metrics. + properties: + client: + description: Defines the configuration for remote write client. + properties: + additionalHeaders: + additionalProperties: + type: string + description: Additional HTTP headers to be sent along with + each remote write request. + type: object + authorization: + description: Type of authorzation to use to access the remote + write endpoint + enum: + - basic + - header + type: string + authorizationSecretName: + description: Name of a secret in the namespace configured + for authorization secrets. + type: string + followRedirects: + default: true + description: Configure whether HTTP requests follow HTTP 3xx + redirects. + type: boolean + name: + description: Name of the remote write config, which if specified + must be unique among remote write configs. + type: string + proxyUrl: + description: Optional proxy URL. + type: string + relabelConfigs: + description: List of remote write relabel configurations. + items: + description: 'RelabelConfig allows dynamic rewriting of + the label set, being applied to samples before ingestion. + It defines `` and `` + sections of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + default: replace + description: Action to perform based on regex matching. + Default is 'replace' + enum: + - drop + - hashmod + - keep + - labeldrop + - labelkeep + - labelmap + - replace + type: string + modulus: + description: Modulus to take of the hash of the source + label values. + format: int64 + type: integer + regex: + default: (.*) + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + default: $1 + description: Replacement value against which a regex + replace is performed if the regular expression matches. + Regex capture groups are available. Default is '$1' + type: string + separator: + default: ; + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular + expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + required: + - sourceLabels + type: object + type: array + timeout: + default: 30s + description: Timeout for requests to the remote write endpoint. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + url: + description: The URL of the endpoint to send samples to. + type: string + required: + - authorization + - authorizationSecretName + - name + - url + type: object + enabled: + description: Enable remote-write functionality. + type: boolean + queue: + description: Defines the configuration for remote write client + queue. + properties: + batchSendDeadline: + default: 5s + description: Maximum time a sample will wait in buffer. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + capacity: + default: 2500 + description: Number of samples to buffer per shard before + we block reading of more + format: int32 + type: integer + maxBackOffPeriod: + default: 100ms + description: Maximum retry delay. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + maxSamplesPerSend: + default: 500 + description: Maximum number of samples per send. + format: int32 + type: integer + maxShards: + default: 200 + description: Maximum number of shards, i.e. amount of concurrency. + format: int32 + type: integer + minBackOffPeriod: + default: 30ms + description: Initial retry delay. Gets doubled for every retry. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + minShards: + default: 200 + description: Minimum number of shards, i.e. amount of concurrency. + format: int32 + type: integer + type: object + refreshPeriod: + default: 10s + description: Minimum period to wait between refreshing remote-write + reconfigurations. + pattern: ((([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?|0) + type: string + type: object + type: object + status: + description: RulerConfigStatus defines the observed state of RulerConfig + properties: + conditions: + description: Conditions of the RulerConfig health. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/operator/bundle/community/metadata/annotations.yaml b/operator/bundle/community/metadata/annotations.yaml new file mode 100644 index 000000000000..f17f3ab1caac --- /dev/null +++ b/operator/bundle/community/metadata/annotations.yaml @@ -0,0 +1,15 @@ +annotations: + # Core bundle annotations. + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: loki-operator + operators.operatorframework.io.bundle.channels.v1: alpha + operators.operatorframework.io.bundle.channel.default.v1: alpha + operators.operatorframework.io.metrics.builder: operator-sdk-unknown + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + + # Annotations for testing. + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operators.operatorframework.io.test.config.v1: tests/scorecard/ diff --git a/operator/bundle/tests/scorecard/config.yaml b/operator/bundle/community/tests/scorecard/config.yaml similarity index 100% rename from operator/bundle/tests/scorecard/config.yaml rename to operator/bundle/community/tests/scorecard/config.yaml diff --git a/operator/bundle.Dockerfile b/operator/bundle/openshift/bundle.Dockerfile similarity index 88% rename from operator/bundle.Dockerfile rename to operator/bundle/openshift/bundle.Dockerfile index 1b4d78931cb3..cddaca777930 100644 --- a/operator/bundle.Dockerfile +++ b/operator/bundle/openshift/bundle.Dockerfile @@ -16,6 +16,6 @@ LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ # Copy files to locations specified by labels. -COPY bundle/manifests /manifests/ -COPY bundle/metadata /metadata/ -COPY bundle/tests/scorecard /tests/scorecard/ +COPY ./manifests /manifests/ +COPY ./metadata /metadata/ +COPY ./tests/scorecard /tests/scorecard/ diff --git a/operator/bundle/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml b/operator/bundle/openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml similarity index 100% rename from operator/bundle/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml rename to operator/bundle/openshift/manifests/loki-operator-controller-manager-metrics-service_v1_service.yaml diff --git a/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/openshift/manifests/loki-operator-manager-config_v1_configmap.yaml similarity index 100% rename from operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml rename to operator/bundle/openshift/manifests/loki-operator-manager-config_v1_configmap.yaml diff --git a/operator/bundle/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml b/operator/bundle/openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml similarity index 100% rename from operator/bundle/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml rename to operator/bundle/openshift/manifests/loki-operator-metrics-monitor_monitoring.coreos.com_v1_servicemonitor.yaml diff --git a/operator/bundle/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operator/bundle/openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml similarity index 100% rename from operator/bundle/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml rename to operator/bundle/openshift/manifests/loki-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml diff --git a/operator/bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml b/operator/bundle/openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml similarity index 100% rename from operator/bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml rename to operator/bundle/openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_role.yaml diff --git a/operator/bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml b/operator/bundle/openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml similarity index 100% rename from operator/bundle/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml rename to operator/bundle/openshift/manifests/loki-operator-prometheus_rbac.authorization.k8s.io_v1_rolebinding.yaml diff --git a/operator/bundle/manifests/loki-operator-webhook-service_v1_service.yaml b/operator/bundle/openshift/manifests/loki-operator-webhook-service_v1_service.yaml similarity index 100% rename from operator/bundle/manifests/loki-operator-webhook-service_v1_service.yaml rename to operator/bundle/openshift/manifests/loki-operator-webhook-service_v1_service.yaml diff --git a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml similarity index 99% rename from operator/bundle/manifests/loki-operator.clusterserviceversion.yaml rename to operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml index 1c7ccb508a29..abf17a210667 100644 --- a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml +++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml @@ -172,7 +172,7 @@ metadata: operatorframework.io/arch.arm64: supported operatorframework.io/arch.ppc64le: supported operatorframework.io/arch.s390x: supported - name: loki-operator.v0.0.1 + name: loki-operator.v0.1.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -1463,7 +1463,7 @@ spec: value: quay.io/observatorium/api:latest - name: RELATED_IMAGE_OPA value: quay.io/observatorium/opa-openshift:latest - image: quay.io/openshift-logging/loki-operator:v0.0.1 + image: quay.io/openshift-logging/loki-operator:v0.1.0 imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -1572,12 +1572,12 @@ spec: - name: Loki Operator url: https://github.com/grafana/loki maintainers: - - email: loki-operator-team@googlegroups.com - name: Grafana Loki SIG Operator + - email: team-logging@redhat.com + name: Red Hat, AOS Logging maturity: alpha minKubeVersion: 1.21.1 provider: - name: Grafana.com + name: Red Hat relatedImages: - image: quay.io/openshift-logging/loki:v2.7.3 name: loki @@ -1585,7 +1585,7 @@ spec: name: gateway - image: quay.io/observatorium/opa-openshift:latest name: opa - version: 0.0.1 + version: 0.1.0 webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/operator/bundle/manifests/loki.grafana.com_alertingrules.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_alertingrules.yaml similarity index 100% rename from operator/bundle/manifests/loki.grafana.com_alertingrules.yaml rename to operator/bundle/openshift/manifests/loki.grafana.com_alertingrules.yaml diff --git a/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml similarity index 100% rename from operator/bundle/manifests/loki.grafana.com_lokistacks.yaml rename to operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml diff --git a/operator/bundle/manifests/loki.grafana.com_recordingrules.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_recordingrules.yaml similarity index 100% rename from operator/bundle/manifests/loki.grafana.com_recordingrules.yaml rename to operator/bundle/openshift/manifests/loki.grafana.com_recordingrules.yaml diff --git a/operator/bundle/manifests/loki.grafana.com_rulerconfigs.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_rulerconfigs.yaml similarity index 100% rename from operator/bundle/manifests/loki.grafana.com_rulerconfigs.yaml rename to operator/bundle/openshift/manifests/loki.grafana.com_rulerconfigs.yaml diff --git a/operator/bundle/metadata/annotations.yaml b/operator/bundle/openshift/metadata/annotations.yaml similarity index 100% rename from operator/bundle/metadata/annotations.yaml rename to operator/bundle/openshift/metadata/annotations.yaml diff --git a/operator/bundle/metadata/properties.yaml b/operator/bundle/openshift/metadata/properties.yaml similarity index 100% rename from operator/bundle/metadata/properties.yaml rename to operator/bundle/openshift/metadata/properties.yaml diff --git a/operator/bundle/openshift/tests/scorecard/config.yaml b/operator/bundle/openshift/tests/scorecard/config.yaml new file mode 100644 index 000000000000..fde2af8b260c --- /dev/null +++ b/operator/bundle/openshift/tests/scorecard/config.yaml @@ -0,0 +1,70 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: + - entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: basic + test: basic-check-spec-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-bundle-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-crds-have-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-crds-have-resources-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-spec-descriptors-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.4.0 + labels: + suite: olm + test: olm-status-descriptors-test + storage: + spec: + mountPath: {} +storage: + spec: + mountPath: {} diff --git a/operator/config/manager/kustomization.yaml b/operator/config/manager/kustomization.yaml index 237a86462f8b..86cb69733899 100644 --- a/operator/config/manager/kustomization.yaml +++ b/operator/config/manager/kustomization.yaml @@ -6,4 +6,4 @@ kind: Kustomization images: - name: controller newName: quay.io/openshift-logging/loki-operator - newTag: v0.0.1 + newTag: v0.1.0 diff --git a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml new file mode 100644 index 000000000000..b32a0953fa8e --- /dev/null +++ b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml @@ -0,0 +1,1477 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: '[]' + capabilities: Full Lifecycle + categories: OpenShift Optional, Logging & Tracing + certified: "false" + containerImage: docker.io/grafana/loki-operator:main-39f2856 + createdAt: "2022-12-22T13:28:40+00:00" + description: The Community Loki Operator provides Kubernetes native deployment + and management of Loki and related logging components. + repository: https://github.com/grafana/loki/tree/main/operator + support: Grafana Loki SIG Operator + labels: + operatorframework.io/arch.amd64: supported + operatorframework.io/arch.arm64: supported + name: loki-operator.v0.0.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: AlertingRule is the Schema for the alertingrules API + displayName: AlertingRule + kind: AlertingRule + name: alertingrules.loki.grafana.com + resources: + - kind: LokiStack + name: "" + version: v1 + specDescriptors: + - description: List of groups for alerting rules. + displayName: Groups + path: groups + - description: Interval defines the time interval between evaluation of the + given alerting rule. + displayName: Evaluation Interval + path: groups[0].interval + - description: Limit defines the number of alerts an alerting rule can produce. + 0 is no limit. + displayName: Limit of firing alerts + path: groups[0].limit + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Name of the alerting rule group. Must be unique within all alerting + rules. + displayName: Name + path: groups[0].name + - description: Rules defines a list of alerting rules + displayName: Rules + path: groups[0].rules + - description: The name of the alert. Must be a valid label value. + displayName: Name + path: groups[0].rules[0].alert + - description: Annotations to add to each alert. + displayName: Annotations + path: groups[0].rules[0].annotations + - description: The LogQL expression to evaluate. Every evaluation cycle this + is evaluated at the current time, and all resultant time series become pending/firing + alerts. + displayName: LogQL Expression + path: groups[0].rules[0].expr + - description: Alerts are considered firing once they have been returned for + this long. Alerts which have not yet fired for long enough are considered + pending. + displayName: Firing Threshold + path: groups[0].rules[0].for + - description: Labels to add to each alert. + displayName: Labels + path: groups[0].rules[0].labels + - description: TenantID of tenant where the alerting rules are evaluated in. + displayName: Tenant ID + path: tenantID + statusDescriptors: + - description: Conditions of the AlertingRule generation health. + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1beta1 + - description: LokiStack is the Schema for the lokistacks API + displayName: LokiStack + kind: LokiStack + name: lokistacks.loki.grafana.com + resources: + - kind: ConfigMap + name: "" + version: v1 + - kind: Deployment + name: "" + version: v1 + - kind: Ingress + name: "" + version: v1 + - kind: PersistentVolumeClaims + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Service + name: "" + version: v1 + - kind: ServiceAccount + name: "" + version: v1 + - kind: ServiceMonitor + name: "" + version: v1 + - kind: StatefulSet + name: "" + version: v1 + specDescriptors: + - description: Limits defines the limits to be applied to log stream processing. + displayName: Rate Limiting + path: limits + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Global defines the limits applied globally across the cluster. + displayName: Global Limits + path: limits.global + - description: IngestionBurstSize defines the local rate-limited sample size + per distributor replica. It should be set to the set at least to the maximum + logs size expected in a single push request. + displayName: Ingestion Burst Size (in MB) + path: limits.global.ingestion.ingestionBurstSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: IngestionRate defines the sample size per second. Units MB. + displayName: Ingestion Rate (in MB) + path: limits.global.ingestion.ingestionRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxGlobalStreamsPerTenant defines the maximum number of active + streams per tenant, across the cluster. + displayName: Max Global Streams per Tenant + path: limits.global.ingestion.maxGlobalStreamsPerTenant + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNameLength defines the maximum number of characters allowed + for label keys in log streams. + displayName: Max Label Name Length + path: limits.global.ingestion.maxLabelNameLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNamesPerSeries defines the maximum number of label names + per series in each log stream. + displayName: Max Labels Names per Series + path: limits.global.ingestion.maxLabelNamesPerSeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelValueLength defines the maximum number of characters + allowed for label values in log streams. + displayName: Max Label Value Length + path: limits.global.ingestion.maxLabelValueLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLineSize defines the maximum line size on ingestion path. + Units in Bytes. + displayName: Max Line Size + path: limits.global.ingestion.maxLineSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxChunksPerQuery defines the maximum number of chunks that can + be fetched by a single query. + displayName: Max Chunk per Query + path: limits.global.queries.maxChunksPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries + that will be returned for a query. + displayName: Max Entries Limit per Query + path: limits.global.queries.maxEntriesLimitPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxQuerySeries defines the the maximum of unique series that + is returned by a metric query. + displayName: Max Query Series + path: limits.global.queries.maxQuerySeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Timeout when querying ingesters or storage during the execution + of a query request. + displayName: Query Timeout + path: limits.global.queries.queryTimeout + - description: Tenants defines the limits applied per tenant. + displayName: Limits per Tenant + path: limits.tenants + - description: IngestionBurstSize defines the local rate-limited sample size + per distributor replica. It should be set to the set at least to the maximum + logs size expected in a single push request. + displayName: Ingestion Burst Size (in MB) + path: limits.tenants.ingestion.ingestionBurstSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: IngestionRate defines the sample size per second. Units MB. + displayName: Ingestion Rate (in MB) + path: limits.tenants.ingestion.ingestionRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxGlobalStreamsPerTenant defines the maximum number of active + streams per tenant, across the cluster. + displayName: Max Global Streams per Tenant + path: limits.tenants.ingestion.maxGlobalStreamsPerTenant + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNameLength defines the maximum number of characters allowed + for label keys in log streams. + displayName: Max Label Name Length + path: limits.tenants.ingestion.maxLabelNameLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNamesPerSeries defines the maximum number of label names + per series in each log stream. + displayName: Max Labels Names per Series + path: limits.tenants.ingestion.maxLabelNamesPerSeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelValueLength defines the maximum number of characters + allowed for label values in log streams. + displayName: Max Label Value Length + path: limits.tenants.ingestion.maxLabelValueLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLineSize defines the maximum line size on ingestion path. + Units in Bytes. + displayName: Max Line Size + path: limits.tenants.ingestion.maxLineSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxChunksPerQuery defines the maximum number of chunks that can + be fetched by a single query. + displayName: Max Chunk per Query + path: limits.tenants.queries.maxChunksPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries + that will be returned for a query. + displayName: Max Entries Limit per Query + path: limits.tenants.queries.maxEntriesLimitPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxQuerySeries defines the the maximum of unique series that + is returned by a metric query. + displayName: Max Query Series + path: limits.tenants.queries.maxQuerySeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Timeout when querying ingesters or storage during the execution + of a query request. + displayName: Query Timeout + path: limits.tenants.queries.queryTimeout + - description: ManagementState defines if the CR should be managed by the operator + or not. Default is managed. + displayName: Management State + path: managementState + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:Managed + - urn:alm:descriptor:com.tectonic.ui:select:Unmanaged + - description: Proxy defines the spec for the object proxy to configure cluster + proxy information. + displayName: Cluster Proxy + path: proxy + - description: HTTPProxy configures the HTTP_PROXY/http_proxy env variable. + displayName: HTTPProxy + path: proxy.httpProxy + - description: HTTPSProxy configures the HTTPS_PROXY/https_proxy env variable. + displayName: HTTPSProxy + path: proxy.httpsProxy + - description: NoProxy configures the NO_PROXY/no_proxy env variable. + displayName: NoProxy + path: proxy.noProxy + - description: ReplicationFactor defines the policy for log stream replication. + displayName: Replication Factor + path: replicationFactor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Rules defines the spec for the ruler component + displayName: Rules + path: rules + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Enabled defines a flag to enable/disable the ruler component + displayName: Enable + path: rules.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Namespaces to be selected for PrometheusRules discovery. If unspecified, + only the same namespace as the LokiStack object is in is used. + displayName: Namespace Selector + path: rules.namespaceSelector + - description: A selector to select which LokiRules to mount for loading alerting/recording + rules from. + displayName: Selector + path: rules.selector + - description: Size defines one of the support Loki deployment scale out sizes. + displayName: LokiStack Size + path: size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small + - urn:alm:descriptor:com.tectonic.ui:select:1x.small + - urn:alm:descriptor:com.tectonic.ui:select:1x.medium + - description: Storage defines the spec for the object storage endpoint to store + logs. + displayName: Object Storage + path: storage + - description: Version for writing and reading logs. + displayName: Version + path: storage.schemas[0].version + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:v11 + - urn:alm:descriptor:com.tectonic.ui:select:v12 + - description: Name of a secret in the namespace configured for object storage + secrets. + displayName: Object Storage Secret Name + path: storage.secret.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: Type of object storage that should be used + displayName: Object Storage Secret Type + path: storage.secret.type + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:azure + - urn:alm:descriptor:com.tectonic.ui:select:gcs + - urn:alm:descriptor:com.tectonic.ui:select:s3 + - urn:alm:descriptor:com.tectonic.ui:select:swift + - description: TLS configuration for reaching the object storage endpoint. + displayName: TLS Config + path: storage.tls + - description: Key is the data key of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the LokiStack custom resource. If + empty, it defaults to "service-ca.crt". + displayName: CA ConfigMap Key + path: storage.tls.caKey + - description: CA is the name of a ConfigMap containing a CA certificate. It + needs to be in the same namespace as the LokiStack custom resource. + displayName: CA ConfigMap Name + path: storage.tls.caName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:ConfigMap + - description: Storage class name defines the storage class for ingester/querier + PVCs. + displayName: Storage Class Name + path: storageClassName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:StorageClass + - description: Template defines the resource/limits/tolerations/nodeselectors + per component + displayName: Node Placement + path: template + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Compactor defines the compaction component spec. + displayName: Compactor pods + path: template.compactor + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.compactor.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Distributor defines the distributor component spec. + displayName: Distributor pods + path: template.distributor + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.distributor.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Gateway defines the lokistack gateway component spec. + displayName: Gateway pods + path: template.gateway + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.gateway.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: IndexGateway defines the index gateway component spec. + displayName: Index Gateway pods + path: template.indexGateway + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.indexGateway.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Ingester defines the ingester component spec. + displayName: Ingester pods + path: template.ingester + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.ingester.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Querier defines the querier component spec. + displayName: Querier pods + path: template.querier + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.querier.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: QueryFrontend defines the query frontend component spec. + displayName: Query Frontend pods + path: template.queryFrontend + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.queryFrontend.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Ruler defines the ruler component spec. + displayName: Ruler pods + path: template.ruler + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.ruler.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Tenants defines the per-tenant authentication and authorization + spec for the lokistack-gateway component. + displayName: Tenants Configuration + path: tenants + - description: Authentication defines the lokistack-gateway component authentication + configuration spec per tenant. + displayName: Authentication + path: tenants.authentication + - description: OIDC defines the spec for the OIDC tenant's authentication. + displayName: OIDC Configuration + path: tenants.authentication[0].oidc + - description: IssuerURL defines the URL for issuer. + displayName: Issuer URL + path: tenants.authentication[0].oidc.issuerURL + - description: RedirectURL defines the URL for redirect. + displayName: Redirect URL + path: tenants.authentication[0].oidc.redirectURL + - description: Secret defines the spec for the clientID, clientSecret and issuerCAPath + for tenant's authentication. + displayName: Tenant Secret + path: tenants.authentication[0].oidc.secret + - description: Name of a secret in the namespace configured for tenant secrets. + displayName: Tenant Secret Name + path: tenants.authentication[0].oidc.secret.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: TenantID defines the id of the tenant. + displayName: Tenant ID + path: tenants.authentication[0].tenantId + - description: TenantName defines the name of the tenant. + displayName: Tenant Name + path: tenants.authentication[0].tenantName + - description: Authorization defines the lokistack-gateway component authorization + configuration spec per tenant. + displayName: Authorization + path: tenants.authorization + - description: OPA defines the spec for the third-party endpoint for tenant's + authorization. + displayName: OPA Configuration + path: tenants.authorization.opa + - description: URL defines the third-party endpoint for authorization. + displayName: OpenPolicyAgent URL + path: tenants.authorization.opa.url + - description: RoleBindings defines configuration to bind a set of roles to + a set of subjects. + displayName: Static Role Bindings + path: tenants.authorization.roleBindings + - description: Roles defines a set of permissions to interact with a tenant. + displayName: Static Roles + path: tenants.authorization.roles + - description: Mode defines the mode in which lokistack-gateway component will + be configured. + displayName: Mode + path: tenants.mode + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:static + - urn:alm:descriptor:com.tectonic.ui:select:dynamic + - urn:alm:descriptor:com.tectonic.ui:select:openshift-logging + - urn:alm:descriptor:com.tectonic.ui:select:openshift-network + statusDescriptors: + - description: Distributor is a map to the per pod status of the distributor + deployment + displayName: Distributor + path: components.distributor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Ingester is a map to the per pod status of the ingester statefulset + displayName: Ingester + path: components.ingester + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Querier is a map to the per pod status of the querier deployment + displayName: Querier + path: components.querier + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: QueryFrontend is a map to the per pod status of the query frontend + deployment + displayName: Query Frontend + path: components.queryFrontend + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Compactor is a map to the pod status of the compactor pod. + displayName: Compactor + path: components.compactor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Gateway is a map to the per pod status of the lokistack gateway + deployment. + displayName: Gateway + path: components.gateway + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: IndexGateway is a map to the per pod status of the index gateway + statefulset + displayName: IndexGateway + path: components.indexGateway + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Ruler is a map to the per pod status of the lokistack ruler statefulset. + displayName: Ruler + path: components.ruler + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Conditions of the Loki deployment health. + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1 + - description: LokiStack is the Schema for the lokistacks API + displayName: LokiStack + kind: LokiStack + name: lokistacks.loki.grafana.com + resources: + - kind: ConfigMap + name: "" + version: v1 + - kind: Deployment + name: "" + version: v1 + - kind: Ingress + name: "" + version: v1 + - kind: PersistentVolumeClaims + name: "" + version: v1 + - kind: Route + name: "" + version: v1 + - kind: Service + name: "" + version: v1 + - kind: ServiceAccount + name: "" + version: v1 + - kind: ServiceMonitor + name: "" + version: v1 + - kind: StatefulSet + name: "" + version: v1 + specDescriptors: + - description: Limits defines the per-tenant limits to be applied to log stream + processing and the per-tenant the config overrides. + displayName: Rate Limiting + path: limits + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Global defines the limits applied globally across the cluster. + displayName: Global Limits + path: limits.global + - description: IngestionBurstSize defines the local rate-limited sample size + per distributor replica. It should be set to the set at least to the maximum + logs size expected in a single push request. + displayName: Ingestion Burst Size (in MB) + path: limits.global.ingestion.ingestionBurstSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: IngestionRate defines the sample size per second. Units MB. + displayName: Ingestion Rate (in MB) + path: limits.global.ingestion.ingestionRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxGlobalStreamsPerTenant defines the maximum number of active + streams per tenant, across the cluster. + displayName: Max Global Streams per Tenant + path: limits.global.ingestion.maxGlobalStreamsPerTenant + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNameLength defines the maximum number of characters allowed + for label keys in log streams. + displayName: Max Label Name Length + path: limits.global.ingestion.maxLabelNameLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNamesPerSeries defines the maximum number of label names + per series in each log stream. + displayName: Max Labels Names per Series + path: limits.global.ingestion.maxLabelNamesPerSeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelValueLength defines the maximum number of characters + allowed for label values in log streams. + displayName: Max Label Value Length + path: limits.global.ingestion.maxLabelValueLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLineSize defines the maximum line size on ingestion path. + Units in Bytes. + displayName: Max Line Size + path: limits.global.ingestion.maxLineSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxChunksPerQuery defines the maximum number of chunks that can + be fetched by a single query. + displayName: Max Chunk per Query + path: limits.global.queries.maxChunksPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries + that will be returned for a query. + displayName: Max Entries Limit per Query + path: limits.global.queries.maxEntriesLimitPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxQuerySeries defines the the maximum of unique series that + is returned by a metric query. + displayName: Max Query Series + path: limits.global.queries.maxQuerySeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Tenants defines the limits and overrides applied per tenant. + displayName: Limits per Tenant + path: limits.tenants + - description: IngestionBurstSize defines the local rate-limited sample size + per distributor replica. It should be set to the set at least to the maximum + logs size expected in a single push request. + displayName: Ingestion Burst Size (in MB) + path: limits.tenants.ingestion.ingestionBurstSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: IngestionRate defines the sample size per second. Units MB. + displayName: Ingestion Rate (in MB) + path: limits.tenants.ingestion.ingestionRate + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxGlobalStreamsPerTenant defines the maximum number of active + streams per tenant, across the cluster. + displayName: Max Global Streams per Tenant + path: limits.tenants.ingestion.maxGlobalStreamsPerTenant + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNameLength defines the maximum number of characters allowed + for label keys in log streams. + displayName: Max Label Name Length + path: limits.tenants.ingestion.maxLabelNameLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelNamesPerSeries defines the maximum number of label names + per series in each log stream. + displayName: Max Labels Names per Series + path: limits.tenants.ingestion.maxLabelNamesPerSeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLabelValueLength defines the maximum number of characters + allowed for label values in log streams. + displayName: Max Label Value Length + path: limits.tenants.ingestion.maxLabelValueLength + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxLineSize defines the maximum line size on ingestion path. + Units in Bytes. + displayName: Max Line Size + path: limits.tenants.ingestion.maxLineSize + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxChunksPerQuery defines the maximum number of chunks that can + be fetched by a single query. + displayName: Max Chunk per Query + path: limits.tenants.queries.maxChunksPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxEntriesLimitsPerQuery defines the maximum number of log entries + that will be returned for a query. + displayName: Max Entries Limit per Query + path: limits.tenants.queries.maxEntriesLimitPerQuery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: MaxQuerySeries defines the the maximum of unique series that + is returned by a metric query. + displayName: Max Query Series + path: limits.tenants.queries.maxQuerySeries + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: ManagementState defines if the CR should be managed by the operator + or not. Default is managed. + displayName: Management State + path: managementState + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:Managed + - urn:alm:descriptor:com.tectonic.ui:select:Unmanaged + - description: ReplicationFactor defines the policy for log stream replication. + displayName: Replication Factor + path: replicationFactor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Rules defines the spec for the ruler component + displayName: Rules + path: rules + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Enabled defines a flag to enable/disable the ruler component + displayName: Enable + path: rules.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Namespaces to be selected for PrometheusRules discovery. If unspecified, + only the same namespace as the LokiStack object is in is used. + displayName: Namespace Selector + path: rules.namespaceSelector + - description: A selector to select which LokiRules to mount for loading alerting/recording + rules from. + displayName: Selector + path: rules.selector + - description: Size defines one of the support Loki deployment scale out sizes. + displayName: LokiStack Size + path: size + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:1x.extra-small + - urn:alm:descriptor:com.tectonic.ui:select:1x.small + - urn:alm:descriptor:com.tectonic.ui:select:1x.medium + - description: Storage defines the spec for the object storage endpoint to store + logs. + displayName: Object Storage + path: storage + - description: Version for writing and reading logs. + displayName: Version + path: storage.schemas[0].version + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:v11 + - urn:alm:descriptor:com.tectonic.ui:select:v12 + - description: Name of a secret in the namespace configured for object storage + secrets. + displayName: Object Storage Secret Name + path: storage.secret.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: Type of object storage that should be used + displayName: Object Storage Secret Type + path: storage.secret.type + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:azure + - urn:alm:descriptor:com.tectonic.ui:select:gcs + - urn:alm:descriptor:com.tectonic.ui:select:s3 + - urn:alm:descriptor:com.tectonic.ui:select:swift + - description: TLS configuration for reaching the object storage endpoint. + displayName: TLS Config + path: storage.tls + - description: CA is the name of a ConfigMap containing a CA certificate. It + needs to be in the same namespace as the LokiStack custom resource. + displayName: CA ConfigMap Name + path: storage.tls.caName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:ConfigMap + - description: Storage class name defines the storage class for ingester/querier + PVCs. + displayName: Storage Class Name + path: storageClassName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:StorageClass + - description: Template defines the resource/limits/tolerations/nodeselectors + per component + displayName: Node Placement + path: template + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Compactor defines the compaction component spec. + displayName: Compactor pods + path: template.compactor + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.compactor.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Distributor defines the distributor component spec. + displayName: Distributor pods + path: template.distributor + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.distributor.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Gateway defines the lokistack gateway component spec. + displayName: Gateway pods + path: template.gateway + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.gateway.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: IndexGateway defines the index gateway component spec. + displayName: Index Gateway pods + path: template.indexGateway + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.indexGateway.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Ingester defines the ingester component spec. + displayName: Ingester pods + path: template.ingester + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.ingester.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Querier defines the querier component spec. + displayName: Querier pods + path: template.querier + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.querier.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: QueryFrontend defines the query frontend component spec. + displayName: Query Frontend pods + path: template.queryFrontend + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.queryFrontend.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Ruler defines the ruler component spec. + displayName: Ruler pods + path: template.ruler + - description: Replicas defines the number of replica pods of the component. + displayName: Replicas + path: template.ruler.replicas + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:hidden + - description: Tenants defines the per-tenant authentication and authorization + spec for the lokistack-gateway component. + displayName: Tenants Configuration + path: tenants + - description: Authentication defines the lokistack-gateway component authentication + configuration spec per tenant. + displayName: Authentication + path: tenants.authentication + - description: OIDC defines the spec for the OIDC tenant's authentication. + displayName: OIDC Configuration + path: tenants.authentication[0].oidc + - description: IssuerURL defines the URL for issuer. + displayName: Issuer URL + path: tenants.authentication[0].oidc.issuerURL + - description: RedirectURL defines the URL for redirect. + displayName: Redirect URL + path: tenants.authentication[0].oidc.redirectURL + - description: Secret defines the spec for the clientID, clientSecret and issuerCAPath + for tenant's authentication. + displayName: Tenant Secret + path: tenants.authentication[0].oidc.secret + - description: Name of a secret in the namespace configured for tenant secrets. + displayName: Tenant Secret Name + path: tenants.authentication[0].oidc.secret.name + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: TenantID defines the id of the tenant. + displayName: Tenant ID + path: tenants.authentication[0].tenantId + - description: TenantName defines the name of the tenant. + displayName: Tenant Name + path: tenants.authentication[0].tenantName + - description: Authorization defines the lokistack-gateway component authorization + configuration spec per tenant. + displayName: Authorization + path: tenants.authorization + - description: OPA defines the spec for the third-party endpoint for tenant's + authorization. + displayName: OPA Configuration + path: tenants.authorization.opa + - description: URL defines the third-party endpoint for authorization. + displayName: OpenPolicyAgent URL + path: tenants.authorization.opa.url + - description: RoleBindings defines configuration to bind a set of roles to + a set of subjects. + displayName: Static Role Bindings + path: tenants.authorization.roleBindings + - description: Roles defines a set of permissions to interact with a tenant. + displayName: Static Roles + path: tenants.authorization.roles + - description: Mode defines the mode in which lokistack-gateway component will + be configured. + displayName: Mode + path: tenants.mode + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:static + - urn:alm:descriptor:com.tectonic.ui:select:dynamic + - urn:alm:descriptor:com.tectonic.ui:select:openshift-logging + statusDescriptors: + - description: Distributor is a map to the per pod status of the distributor + deployment + displayName: Distributor + path: components.distributor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Ingester is a map to the per pod status of the ingester statefulset + displayName: Ingester + path: components.ingester + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Querier is a map to the per pod status of the querier deployment + displayName: Querier + path: components.querier + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: QueryFrontend is a map to the per pod status of the query frontend + deployment + displayName: Query Frontend + path: components.queryFrontend + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Compactor is a map to the pod status of the compactor pod. + displayName: Compactor + path: components.compactor + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Gateway is a map to the per pod status of the lokistack gateway + deployment. + displayName: Gateway + path: components.gateway + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: IndexGateway is a map to the per pod status of the index gateway + statefulset + displayName: IndexGateway + path: components.indexGateway + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Ruler is a map to the per pod status of the lokistack ruler statefulset. + displayName: Ruler + path: components.ruler + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:podStatuses + - description: Conditions of the Loki deployment health. + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1beta1 + - description: RecordingRule is the Schema for the recordingrules API + displayName: RecordingRule + kind: RecordingRule + name: recordingrules.loki.grafana.com + resources: + - kind: LokiStack + name: "" + version: v1 + specDescriptors: + - description: List of groups for recording rules. + displayName: Groups + path: groups + - description: Interval defines the time interval between evaluation of the + given recoding rule. + displayName: Evaluation Interval + path: groups[0].interval + - description: Limit defines the number of series a recording rule can produce. + 0 is no limit. + displayName: Limit of produced series + path: groups[0].limit + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Name of the recording rule group. Must be unique within all recording + rules. + displayName: Name + path: groups[0].name + - description: Rules defines a list of recording rules + displayName: Rules + path: groups[0].rules + - description: The LogQL expression to evaluate. Every evaluation cycle this + is evaluated at the current time, and all resultant time series become pending/firing + alerts. + displayName: LogQL Expression + path: groups[0].rules[0].expr + - description: The name of the time series to output to. Must be a valid metric + name. + displayName: Metric Name + path: groups[0].rules[0].record + - description: TenantID of tenant where the recording rules are evaluated in. + displayName: Tenant ID + path: tenantID + statusDescriptors: + - description: Conditions of the RecordingRule generation health. + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1beta1 + - description: RulerConfig is the Schema for the rulerconfigs API + displayName: RulerConfig + kind: RulerConfig + name: rulerconfigs.loki.grafana.com + resources: + - kind: LokiStack + name: "" + version: v1 + specDescriptors: + - description: Defines alert manager configuration to notify on firing alerts. + displayName: Alert Manager Configuration + path: alertmanager + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Client configuration for reaching the alertmanager endpoint. + displayName: TLS Config + path: alertmanager.client + - description: Basic authentication configuration for reaching the alertmanager + endpoints. + displayName: Basic Authentication + path: alertmanager.client.basicAuth + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: The subject's password for the basic authentication configuration. + displayName: Password + path: alertmanager.client.basicAuth.password + - description: The subject's username for the basic authentication configuration. + displayName: Username + path: alertmanager.client.basicAuth.username + - description: Header authentication configuration for reaching the alertmanager + endpoints. + displayName: Header Authentication + path: alertmanager.client.headerAuth + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: The credentials for the header authentication configuration. + displayName: Credentials + path: alertmanager.client.headerAuth.credentials + - description: The credentials file for the Header authentication configuration. + It is mutually exclusive with `credentials`. + displayName: Credentials File + path: alertmanager.client.headerAuth.credentialsFile + - description: The authentication type for the header authentication configuration. + displayName: Type + path: alertmanager.client.headerAuth.type + - description: TLS configuration for reaching the alertmanager endpoints. + displayName: TLS + path: alertmanager.client.tls + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: The CA certificate file path for the TLS configuration. + displayName: CA Path + path: alertmanager.client.tls.caPath + - description: The client-side certificate file path for the TLS configuration. + displayName: Cert Path + path: alertmanager.client.tls.certPath + - description: The client-side key file path for the TLS configuration. + displayName: Key Path + path: alertmanager.client.tls.keyPath + - description: The server name to validate in the alertmanager server certificates. + displayName: Server Name + path: alertmanager.client.tls.serverName + - description: Defines the configuration for DNS-based discovery of AlertManager + hosts. + displayName: DNS Discovery + path: alertmanager.discovery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Use DNS SRV records to discover Alertmanager hosts. + displayName: Enable SRV + path: alertmanager.discovery.enableSRV + - description: How long to wait between refreshing DNS resolutions of Alertmanager + hosts. + displayName: Refresh Interval + path: alertmanager.discovery.refreshInterval + - description: If enabled, then requests to Alertmanager use the v2 API. + displayName: Enable AlertManager V2 API + path: alertmanager.enableV2 + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: List of AlertManager URLs to send notifications to. Each Alertmanager + URL is treated as a separate group in the configuration. Multiple Alertmanagers + in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery). + displayName: AlertManager Endpoints + path: alertmanager.endpoints + - description: Additional labels to add to all alerts. + displayName: Extra Alert Labels + path: alertmanager.externalLabels + - description: URL for alerts return path. + displayName: Alert External URL + path: alertmanager.externalUrl + - description: Defines the configuration for the notification queue to AlertManager + hosts. + displayName: Notification Queue + path: alertmanager.notificationQueue + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Capacity of the queue for notifications to be sent to the Alertmanager. + displayName: Notification Queue Capacity + path: alertmanager.notificationQueue.capacity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Minimum duration between alert and restored "for" state. This + is maintained only for alerts with configured "for" time greater than the + grace period. + displayName: Firing Grace Period + path: alertmanager.notificationQueue.forGracePeriod + - description: Max time to tolerate outage for restoring "for" state of alert. + displayName: Outage Tolerance + path: alertmanager.notificationQueue.forOutageTolerance + - description: Minimum amount of time to wait before resending an alert to Alertmanager. + displayName: Resend Delay + path: alertmanager.notificationQueue.resendDelay + - description: HTTP timeout duration when sending notifications to the Alertmanager. + displayName: Timeout + path: alertmanager.notificationQueue.timeout + - description: List of alert relabel configurations. + displayName: Alert Relabel Configuration + path: alertmanager.relabelConfigs + - description: Action to perform based on regex matching. Default is 'replace' + displayName: Action + path: alertmanager.relabelConfigs[0].action + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:drop + - urn:alm:descriptor:com.tectonic.ui:select:hashmod + - urn:alm:descriptor:com.tectonic.ui:select:keep + - urn:alm:descriptor:com.tectonic.ui:select:labeldrop + - urn:alm:descriptor:com.tectonic.ui:select:labelkeep + - urn:alm:descriptor:com.tectonic.ui:select:labelmap + - urn:alm:descriptor:com.tectonic.ui:select:replace + - description: Modulus to take of the hash of the source label values. + displayName: Modulus + path: alertmanager.relabelConfigs[0].modulus + - description: Regular expression against which the extracted value is matched. + Default is '(.*)' + displayName: Regex + path: alertmanager.relabelConfigs[0].regex + - description: Replacement value against which a regex replace is performed + if the regular expression matches. Regex capture groups are available. Default + is '$1' + displayName: Replacement + path: alertmanager.relabelConfigs[0].replacement + - description: Separator placed between concatenated source label values. default + is ';'. + displayName: Separator + path: alertmanager.relabelConfigs[0].separator + - description: The source labels select values from existing labels. Their content + is concatenated using the configured separator and matched against the configured + regular expression for the replace, keep, and drop actions. + displayName: Source Labels + path: alertmanager.relabelConfigs[0].sourceLabels + - description: Label to which the resulting value is written in a replace action. + It is mandatory for replace actions. Regex capture groups are available. + displayName: Target Label + path: alertmanager.relabelConfigs[0].targetLabel + - description: Interval on how frequently to evaluate rules. + displayName: Evaluation Interval + path: evaluationInterval + - description: Overrides defines the config overrides to be applied per-tenant. + displayName: Rate Limiting + path: overrides + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Client configuration for reaching the alertmanager endpoint. + displayName: TLS Config + path: overrides.alertmanager.client + - description: Basic authentication configuration for reaching the alertmanager + endpoints. + displayName: Basic Authentication + path: overrides.alertmanager.client.basicAuth + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: The subject's password for the basic authentication configuration. + displayName: Password + path: overrides.alertmanager.client.basicAuth.password + - description: The subject's username for the basic authentication configuration. + displayName: Username + path: overrides.alertmanager.client.basicAuth.username + - description: Header authentication configuration for reaching the alertmanager + endpoints. + displayName: Header Authentication + path: overrides.alertmanager.client.headerAuth + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: The credentials for the header authentication configuration. + displayName: Credentials + path: overrides.alertmanager.client.headerAuth.credentials + - description: The credentials file for the Header authentication configuration. + It is mutually exclusive with `credentials`. + displayName: Credentials File + path: overrides.alertmanager.client.headerAuth.credentialsFile + - description: The authentication type for the header authentication configuration. + displayName: Type + path: overrides.alertmanager.client.headerAuth.type + - description: TLS configuration for reaching the alertmanager endpoints. + displayName: TLS + path: overrides.alertmanager.client.tls + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: The CA certificate file path for the TLS configuration. + displayName: CA Path + path: overrides.alertmanager.client.tls.caPath + - description: The client-side certificate file path for the TLS configuration. + displayName: Cert Path + path: overrides.alertmanager.client.tls.certPath + - description: The client-side key file path for the TLS configuration. + displayName: Key Path + path: overrides.alertmanager.client.tls.keyPath + - description: The server name to validate in the alertmanager server certificates. + displayName: Server Name + path: overrides.alertmanager.client.tls.serverName + - description: Defines the configuration for DNS-based discovery of AlertManager + hosts. + displayName: DNS Discovery + path: overrides.alertmanager.discovery + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Use DNS SRV records to discover Alertmanager hosts. + displayName: Enable SRV + path: overrides.alertmanager.discovery.enableSRV + - description: How long to wait between refreshing DNS resolutions of Alertmanager + hosts. + displayName: Refresh Interval + path: overrides.alertmanager.discovery.refreshInterval + - description: If enabled, then requests to Alertmanager use the v2 API. + displayName: Enable AlertManager V2 API + path: overrides.alertmanager.enableV2 + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: List of AlertManager URLs to send notifications to. Each Alertmanager + URL is treated as a separate group in the configuration. Multiple Alertmanagers + in HA per group can be supported by using DNS resolution (See EnableDNSDiscovery). + displayName: AlertManager Endpoints + path: overrides.alertmanager.endpoints + - description: Additional labels to add to all alerts. + displayName: Extra Alert Labels + path: overrides.alertmanager.externalLabels + - description: URL for alerts return path. + displayName: Alert External URL + path: overrides.alertmanager.externalUrl + - description: Defines the configuration for the notification queue to AlertManager + hosts. + displayName: Notification Queue + path: overrides.alertmanager.notificationQueue + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Capacity of the queue for notifications to be sent to the Alertmanager. + displayName: Notification Queue Capacity + path: overrides.alertmanager.notificationQueue.capacity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Minimum duration between alert and restored "for" state. This + is maintained only for alerts with configured "for" time greater than the + grace period. + displayName: Firing Grace Period + path: overrides.alertmanager.notificationQueue.forGracePeriod + - description: Max time to tolerate outage for restoring "for" state of alert. + displayName: Outage Tolerance + path: overrides.alertmanager.notificationQueue.forOutageTolerance + - description: Minimum amount of time to wait before resending an alert to Alertmanager. + displayName: Resend Delay + path: overrides.alertmanager.notificationQueue.resendDelay + - description: HTTP timeout duration when sending notifications to the Alertmanager. + displayName: Timeout + path: overrides.alertmanager.notificationQueue.timeout + - description: List of alert relabel configurations. + displayName: Alert Relabel Configuration + path: overrides.alertmanager.relabelConfigs + - description: Action to perform based on regex matching. Default is 'replace' + displayName: Action + path: overrides.alertmanager.relabelConfigs[0].action + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:drop + - urn:alm:descriptor:com.tectonic.ui:select:hashmod + - urn:alm:descriptor:com.tectonic.ui:select:keep + - urn:alm:descriptor:com.tectonic.ui:select:labeldrop + - urn:alm:descriptor:com.tectonic.ui:select:labelkeep + - urn:alm:descriptor:com.tectonic.ui:select:labelmap + - urn:alm:descriptor:com.tectonic.ui:select:replace + - description: Modulus to take of the hash of the source label values. + displayName: Modulus + path: overrides.alertmanager.relabelConfigs[0].modulus + - description: Regular expression against which the extracted value is matched. + Default is '(.*)' + displayName: Regex + path: overrides.alertmanager.relabelConfigs[0].regex + - description: Replacement value against which a regex replace is performed + if the regular expression matches. Regex capture groups are available. Default + is '$1' + displayName: Replacement + path: overrides.alertmanager.relabelConfigs[0].replacement + - description: Separator placed between concatenated source label values. default + is ';'. + displayName: Separator + path: overrides.alertmanager.relabelConfigs[0].separator + - description: The source labels select values from existing labels. Their content + is concatenated using the configured separator and matched against the configured + regular expression for the replace, keep, and drop actions. + displayName: Source Labels + path: overrides.alertmanager.relabelConfigs[0].sourceLabels + - description: Label to which the resulting value is written in a replace action. + It is mandatory for replace actions. Regex capture groups are available. + displayName: Target Label + path: overrides.alertmanager.relabelConfigs[0].targetLabel + - description: Interval on how frequently to poll for new rule definitions. + displayName: Poll Interval + path: pollInterval + - description: Defines a remote write endpoint to write recording rule metrics. + displayName: Remote Write Configuration + path: remoteWrite + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Defines the configuration for remote write client. + displayName: Client + path: remoteWrite.client + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Type of authorzation to use to access the remote write endpoint + displayName: Authorization Type + path: remoteWrite.client.authorization + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:basic + - urn:alm:descriptor:com.tectonic.ui:select:header + - description: Name of a secret in the namespace configured for authorization + secrets. + displayName: Authorization Secret Name + path: remoteWrite.client.authorizationSecretName + x-descriptors: + - urn:alm:descriptor:io.kubernetes:Secret + - description: Configure whether HTTP requests follow HTTP 3xx redirects. + displayName: Follow HTTP Redirects + path: remoteWrite.client.followRedirects + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Name of the remote write config, which if specified must be unique + among remote write configs. + displayName: Name + path: remoteWrite.client.name + - description: Optional proxy URL. + displayName: HTTP Proxy URL + path: remoteWrite.client.proxyUrl + - description: List of remote write relabel configurations. + displayName: Metric Relabel Configuration + path: remoteWrite.client.relabelConfigs + - description: Action to perform based on regex matching. Default is 'replace' + displayName: Action + path: remoteWrite.client.relabelConfigs[0].action + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:drop + - urn:alm:descriptor:com.tectonic.ui:select:hashmod + - urn:alm:descriptor:com.tectonic.ui:select:keep + - urn:alm:descriptor:com.tectonic.ui:select:labeldrop + - urn:alm:descriptor:com.tectonic.ui:select:labelkeep + - urn:alm:descriptor:com.tectonic.ui:select:labelmap + - urn:alm:descriptor:com.tectonic.ui:select:replace + - description: Modulus to take of the hash of the source label values. + displayName: Modulus + path: remoteWrite.client.relabelConfigs[0].modulus + - description: Regular expression against which the extracted value is matched. + Default is '(.*)' + displayName: Regex + path: remoteWrite.client.relabelConfigs[0].regex + - description: Replacement value against which a regex replace is performed + if the regular expression matches. Regex capture groups are available. Default + is '$1' + displayName: Replacement + path: remoteWrite.client.relabelConfigs[0].replacement + - description: Separator placed between concatenated source label values. default + is ';'. + displayName: Separator + path: remoteWrite.client.relabelConfigs[0].separator + - description: The source labels select values from existing labels. Their content + is concatenated using the configured separator and matched against the configured + regular expression for the replace, keep, and drop actions. + displayName: Source Labels + path: remoteWrite.client.relabelConfigs[0].sourceLabels + - description: Label to which the resulting value is written in a replace action. + It is mandatory for replace actions. Regex capture groups are available. + displayName: Target Label + path: remoteWrite.client.relabelConfigs[0].targetLabel + - description: Timeout for requests to the remote write endpoint. + displayName: Remote Write Timeout + path: remoteWrite.client.timeout + - description: The URL of the endpoint to send samples to. + displayName: Endpoint + path: remoteWrite.client.url + - description: Enable remote-write functionality. + displayName: Enabled + path: remoteWrite.enabled + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:booleanSwitch + - description: Defines the configuration for remote write client queue. + displayName: Client Queue + path: remoteWrite.queue + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:advanced + - description: Maximum time a sample will wait in buffer. + displayName: Batch Send Deadline + path: remoteWrite.queue.batchSendDeadline + - description: Number of samples to buffer per shard before we block reading + of more + displayName: Queue Capacity + path: remoteWrite.queue.capacity + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Maximum retry delay. + displayName: Max BackOff Period + path: remoteWrite.queue.maxBackOffPeriod + - description: Maximum number of samples per send. + displayName: Maximum Shards per Send + path: remoteWrite.queue.maxSamplesPerSend + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Maximum number of shards, i.e. amount of concurrency. + displayName: Maximum Shards + path: remoteWrite.queue.maxShards + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Initial retry delay. Gets doubled for every retry. + displayName: Min BackOff Period + path: remoteWrite.queue.minBackOffPeriod + - description: Minimum number of shards, i.e. amount of concurrency. + displayName: Minimum Shards + path: remoteWrite.queue.minShards + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:number + - description: Minimum period to wait between refreshing remote-write reconfigurations. + displayName: Min Refresh Period + path: remoteWrite.refreshPeriod + statusDescriptors: + - description: Conditions of the RulerConfig health. + displayName: Conditions + path: conditions + x-descriptors: + - urn:alm:descriptor:io.kubernetes.conditions + version: v1beta1 + description: |- + The Community Loki Operator provides Kubernetes native deployment and management of Loki and related logging components. + The purpose of this project is to simplify and automate the configuration of a Loki based logging stack for Kubernetes clusters. + + ### Operator features + + The Loki operator includes, but is not limited to, the following features: + + * Kubernetes Custom Resources: Use Kubernetes custom resources to deploy and manage Loki, Alerting rules, Recording rules, and related components. + * Simplified Deployment Configuration: Configure the fundamentals of Loki like tenants, limits, replication factor and storage from a native Kubernetes resource. + + ### Before you start + + 1. Ensure that [cert-manager](https://operatorhub.io/operator/cert-manager) is installed first. + 2. Ensure that the appropriate object storage solution, that will be used by Loki, is avaliable and configured. + displayName: Community Loki Operator + icon: + - base64data: <?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->

<svg
   version="1.1"
   id="svg48"
   width="500"
   height="500"
   viewBox="0 0 500 500"
   xmlns:xlink="http://www.w3.org/1999/xlink"
   xmlns="http://www.w3.org/2000/svg"
   xmlns:svg="http://www.w3.org/2000/svg">
  <defs
     id="defs52" />
  <g
     id="g54">
    <image
       width="500"
       height="500"
       preserveAspectRatio="none"
       xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAfQAAAH0CAYAAADL1t+KAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJ
bWFnZVJlYWR5ccllPAAATZFJREFUeNrsvVmXJFd5r5/h5Zseqiu/QUU3g4az7Cqw4epAhzi2bwy2
7AUcw8EgbDFPEkICgaAFCCSQrAEEEkhGAmyMDQZh4ZvDoCjwjZmUCeuo1S2plf0Naq7LOhE1dOUQ
w9773W8Mmc+zVqLz/7s6MiIjcz/7jfjFu4MOTDUrb/wfYfKf7yevr3e/9f/u5RMBAABot9iX+BQA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMCAgI+gGaxee2X6nHj6Wtz/7/L8w0/dyicDAAAm
/D4fQSPkHTHNAgAAhD6d8qazGwAAGEMtqCvyJ4Wf9sn5h54a8GkCAEAZv8dHoMP8w0/1dgU+/LKH
Kh0AABB6A+gJ/z1CBwAAhN5qoe9V9af5CAEAwARCcbr0jeVNhQ4AAFToLarQg4xXPt3Vd1wZ8jEC
AABCr5H5h56KrYNxk39PlQ4AAAi9kVW6XbWO0AEAAKE3Ruh2l9qHIRgHAAClEIrTJjAMxuVDhQ4A
AFTojanQ3SEYBwAACL1u5r/yVOxY2Q+/qNIBAAChN75KL7+/jtABAAChN0robuE4gnEAAFAIobgq
IBgHAABU6FNWobvRXX0nwTgAAEDotTL/oCQYt3PwokoHAACE3vgqfVTee6/OzvBfIHQAAEDojRJ6
ubyzIBgHAAC5EIqrimCHYBwAAFChT1WF7kZ39V1XhHyMAACA0Gtk/oGzsXt136FjHAAAFMIl9+qr
9KVSeeeT/tvHfOzI6rVXRvvbW0xe/fmHn7qX0wMAgNDBReiB9b93CsaNyXspYx/SqwcIHQAAoYMR
th3jHHq6G8g7i4iTAwDQdsVAZay++4pUnE8Iz8TJ+S+dHYxJPEz+831h9f+S+Yee6nGWAADaCaG4
Cpn/8n4wzm2Bltwqff7hpwa7gTm37RlX/wAAgNDhsHKWVsF54o2F213k5AAAIHQwRyr00962O3qV
gAodAKDFEIqrHvuOcYFRhd632EYWEacGAIAKHXxV6OX317ur783sGNez2Ebm+66+40qqdAAAhA4m
zH9pqGOcezhuMhiXJtQt5J3zvggdAAChgzFpME72wKB5MM5u0kAwDgAAoYMFOsG4g4mC++NrVOgA
AC2FUFw91BOMKyfi1AAAUKGDrwrdJBj3vpJgnO1kYf+1+k6CcQAACB2MmL9fKRj3FYPWreXvh9AB
ABA6WFTF+sE4t8kCwTgAAIQOFugH4/xOFAAAoMEQiqsPtwBb+TPjBOMAAKjQoVEVevEl8+7q+z0E
43a3uzPyWn3XFVTpAAAIHUyY/+JQMK5c3nkyngzGPVgQjMuQd6ezk/WXCB0AAKGDRXUsbdeaH4wz
l3cWBOMAABA6WNCzkHcWOcG4nZ6FvKnQAQCmAEJx9dJXenRNFowLCMYBAFChg404pY+udVc/IAzG
5VwNWH03wTgAAIQORszfNxaMc5sUTAbjHjgrvZSfuV0AAEDokI+0ml7K+dvYKjU/+bcE4wAAEDqI
hS4NxhVNFMy2TYUOANAiCMXlsHrt7qpjS/uVavrfKHldNf/wU7Hnt9INxrlvO+JbAACA0KdB3p0M
Iab///0K3Ucw7rorwvl7zw48b7ez+p4rlua/dLbHNwQAAKG3Wd5ZeL+vnIg4ToQs3Ux6HCNCT0Wc
CFky0TjYLkIHAEDojZP5rcl/zhjKO0+cGvSstp29hvljGX8Zd0wunQfVTWAAAECH2QrFDSe/3QhX
33ZlV0no+bItD7CZB+PsesYTjAMAQOiNRHb5eE+AGpLrO8i2XLzBfuDOfnsHRPxEAAAQeuOYf+ip
lc7YvWaHyti/5A4WaXGnu3q9h6VUM1h9Lx3jAAAQeluqdLvK2H8w7h4PHeMyqvT5+x0T6qOfBUIH
AEDoDcT2MnRQmeB6lsdhul+x45UItQkMAAAgdB/EArmlhKvvaGkwzu0ePRU6AABCbyQ9gdw0Jecc
jAuC9LVTHoxzI+JnAgCA0BvH/FefWknkNhBuxr/kDINx+/IeeXU66avTXbvh8tCq8jdk9X3TG4xb
vfbKKHldl7weSV5Ppv9vhgUAaCOz2vo1lVzoIN0D/Afj7j4br37wigl578va5srByGRl/otne4mQ
XY9zeLut7xiXyrsz2iVwKeOY6YwHAAi9RaSXt6+2lNq44PwX6cFOT7ht+45xZpfiWxeMM5R3ZecW
AACh65AK7oyl1IYJV995ZXf+wd3n2n1fOZAIpSgYF1kfZ9A+ye2L/AnBuUXoANBKZnU99J6ge5rm
wN+XlfglHePK5J0fxotac2bTfITw3K6+7cqIoQEAEHoLmP+KRce4fHFoDPo9i/fPEnB37UMGwTiH
JP3qB9oRjJt/6Kn0vK44nE+a6QAAQm9tlW412O+Mvjo73u8rn/iHp2OH6rn0ysH8F8429YpEPeeW
ZjoAgNCnir6FvKsTXDB2O8DfPeBYuGdtktyy02SICh0AEHoriS3knUW4+q4rqu0YZ8ZpL9utruWt
7qTIrUpH6ACA0Nsz6KePiO1It9KeYFzRds2q2KhFZ1eeGXgHwTgAQOitYP6Bs+7BuEMxaAz68mDc
jQXBOEHL29XrWhKM++pTadJ9ZYYyAwAAM33J3UyexQL0H4y7SykYd99Z6ZrrbZOc/a2L0c+XYBwA
IPQW0XeWpvZSqk0LxrVPcsuCiRoVOgAg9FYRHATjOq4BqnD1PQrBuKDmYFz+8bazQmfZWABA6NPB
6rVXLiWvrpHg7Af/5gXjOhbBOLvjjRp2Xrv7q6V1MydFsnvondV3EowDgPYwdb3cU3l3RhflOBiU
r09e9w7/7fyXz64kFfag47Ly2qjkYrXq0rSiHqW7dtPl4YnPPz2Y2K7wPvrq9Vcszd9ztlfDee0O
nc+DcxvuH/9bk/99dOTcPvjUIBFyGny0u4IS7IxPjGKGCQBA6PXJ26b7l9tSquXbdebEnU/Hazde
birvoip9ROjz957tJUKW7p76Uqol8rY5B73CqwrBTuXnFgBg5oWeDPLh2AAfWUqu6DL01YJqWDMY
Z7/t0fvddkupmm13UencXmMob5tzu3zpWAOnngPcRweA1tCme+jp4PpIMshft/v8t30C3D75bXZ/
OVx9b03BuOL9cw/GVd9FLT2fZ5LX1ckrdLj3HeVU4L3DDoDO3zkAAITuXXDS+7/Zy2KKG650qgjG
+QvribYbBDuRyrmV/fvO6tuvXHKavJRMaFbffUXEMAEACN0jWstizn/p7MruGtrSClNrAuOe1O6u
fTijY5zFdhN5T7xS1m64fMnzuZV2x8ucwMw/cLb8O8Pz6ACA0GtBa1lMaSXnPxj3uZyOcXZMSu7u
7IR6nrwrvCIRO5zP4b8rP7d226NjHAAgdEXsl8U0WzGs/Lnvelbncr8UXXC8iaxjC3lnsahyrPbn
s7zpTZB8Z9y2R4UOAAhdjbJmIe6Dc+xUGR4Srr6vxqVU8/fXz1KqVVTogVrTGzrGAQBCbyDSwTlv
WUzp/WqtgV8qOfcrEsUTh6i2Cr343C5ZTwJNtvsegnEAgNC9MrIspsfqcv5+wVKq5RWinysSblLq
rn2kYClVt6p/l7UPeQ7GffUp6e2FvNBjel69hykBABC6zyrdbYD2E4ybFJ7/YNwdOsG4E//wdE9Q
9WtekYgdPvdh3M6te5gSAAChC1gWDvr2l6HNhNeqYNzISnNuLKofq79n75cF29Q8twAAVOgKg35s
vb3RvwtXP0AwTkQajJNlGaKc7fYamI8AAJhxocsbrmQvi1nWia6+Kr0ZwbjJ94waczXioL1r8lp9
1xV+OsaNHe/q+wjGAQBC90q6LGbHJeR0aZDeHfgnw1NfGArGubeB9T/o+wjG3WwZjDM8/rUbPQfj
HjQIxg3JO6dPe1bosfg7U+8tFQCAGa3QTSuuIGPw71wa/BcL5emO/2Dc7UrBuLuebmoP+9hC3jbn
QHq8BOMAAKErsGwhbxsR9Z33SPfxJr1gnIzT/q9IKK2QdtAxzuOkCAAAofsc9MvlbTM4mwkuv8oL
V6+boWBcUFFmwO64I2/HyrPoAIDQ1ZEvt5m9LGbPQpJVVnJawbhlm88s4/2WKj+3BsednNslle2+
n2AcACB0rxgti1k+UE+Gp+7bX0pV49Ep2QTELRh3+PfdtY9ZBOMsJgtrN10eeT630tXRsjvGfXHo
O+NxuwAACF2jkrN7htzvUqqH+A/GfdYgGOewMM2JO58+bKVrOlmoQnIHTW/cJxv2oUfJdgEAELpo
0F92bABTdrlYeg9XPxjnt9uZ9PE1vY5x7on04lsM/rcLAIDQvVbodoO1WzCufPvh6vUKwTh5t7PT
rZHcQcc4dyLrCh2hAwBCr1Hosmeo85bFFC/R2uk0JP1tsk/NlJx0oZzO6nuVOsZdRzAOABC6V+a/
7Lgs5uhgPRmeuncoGOe7QqxScpNC6q7d4riUasn21z7iORj3pbPiLnad7NBj8XeGjnEAgNBrrNIF
VVynoxCM01pK9TNKwbjPPW0+MbJ4YkDM8Gpwbldg6BgHAAi9RSz7ruL2Mbu8XaXgxica2sE4u+3r
L6VqP6miYxwAIPRWVeg6vchjieCCoBOu3XB5A4JxO+Ov04WSc3/+vr7MQP55ibxNFHgWHQAQujKB
+JnxNDwVGU0U8uWdvHZGXvutaCuW3E7GS1Ch1y90cde+1fcpdYy7nmAcACB0r5Qui2k2YE+Gp+4Z
WkrVTN5ZaAz6PQt5Zx13d+3jl4XehD70Oa7d7DkYd/9Z6bKxmRON+XsdOsZVM4EBAJjhCr1MRrLG
KD0Ledts15kTt6XBOGN55x33ZDDuDoNgnEPgzgOxcKLRmmVyAQAQulJjlETgOs99+57A+Fs8Rhq4
qycYZzl5GfnOOFyJ4D46ACB0LfQao0irw3DtQ8rBOLfjLg/GNWcC03eavEiWUuVZdABA6LUhvv+b
syymtDqsRnL+xNu8YJzJ5KXkHKx+oCAYZ/Xkwujtl7Ub/GYGAABmXugjy2J6FO+Jf3h6NBjndhla
Y9CXPp/dXfuEp2Dc2Oex9lHPwbgvnJXeXsg8t/P3nN1bZc5Q3nv5Cap0AEDo1VbpbmtdL5ZWiG74
D8Z9+lzsItvSYNztJcG4ui5FD3eM83sOehbyruTcAgAg9LL7v+6XxvvC/WpnMM52e2UTo6qvSBgE
2BJxL1vImwodABB6Yyt0s8E5Nt6D7PcN125sQDBuct+Kg3EeK38P9AX7kxJ5mSiQdAcAhF5JhS69
NJ6GpyLjQd9u0lCt5GSXxrUq/zona+mSp3Yd4wzfZ+1DBOMAAKF7pXRZTLPKa/K+8l1PHy6l6v6o
mMagL10xrLt2JiMYJ38krrP2Mc/BuPvOiidrnezQ4953RmctAAAAhO6lknOrvPwvpVq8XWdOfOpc
LBRctuQ+Y7GUarWSix0naeWhx4adWwAAhD7e/cvf5WL7YFw191q1nhuX3luuNhgnu8VAxzgAQOiN
Q3652C0YVy6UcO0mhWCcPP192lpy9XVR6ztO0g6IKp4oAAAg9Noq1iB3WUy3UJZ+JSdNf/t9dG2X
3QVs9DrGCVj9oJ+OceOs3UgwDgAQuldGlsW0q1SHB/DJ+8p3DgXj3OWpMehL09/dtVsLgnFm8u5k
LeG6dstlXo93/p6z0tsA2ef2rqcLO8YZvhdVOgAg9EqqdDvZlQfjGrIS2YlPnos9pL8nxHvitqxg
nMX663pXJGLHydTenwU7fkKPk+91mmEEABC6b4Y7xrnJbilnu/0GrkSWLzlzciS307OQd9bnrROM
M5d3VjtX+2Ac99EBAKE3qEK3mxD4XUr1kHDtI40MxplLru5mOkF2ZsCiF3tUeG7d1gDYWyb3wyrn
FgAAoTuI7dIAvvpBx6VUy7fdrraoGlc6hOdWsJDKLms3XL6U8Rn1BGsAUKUDAELXYHdZzKxgnN26
15Phqc+NLaXqtu1IdQLjmNRe+9Rl2ZITsvZxz8G4u8+Kn2LI7Ab4+f1zK+sYFzGUAABC901gXl3m
VHyLxlWr3aDvPxh36zm7tqim6e9Pn8ueGNm9R30d42zv7QfiWzV0jAMAhK7AsoW8s8gPxsmoNhhn
PvlwT3+7tdL1c0XC7cqEvBtgdbdTAABmXujSe61+g3HD4amblYJxOguMyFrpBkrBOJ2FcmJjgWej
FXoEAJhdoSfylt//vSGz+5e0YtWp5ORXDqKc7fYs5F1V1doTfE67r7UbL1fpGEeVDgAI3TOXlsV0
q6Lzw1N3jAXj3Ab/qFGS22f905eZrxducdxrZ/wG407c9XTP8XwWinc39BjkhB7Nv0MRwwkAIPQq
JWcmpMXSqtUN/8G4T5yzEnoQTL4yJfepc3ttUZv3+FrsOsEoOQfSRx4JxgEAQldgWVBFF4lIdnk7
qDYYlyNvHclVNIFRXFWvL7wiwSV3AEDoaoO+O/JgXLYIwrWP6gTjLORtc7zLjp9/favMle9TlPN/
j4X30LVCjwAAM12hS1ui5i2LKb6vrFHJBVrBOHkQsJ5gXMn5WLvp8iUv35nJ96VKBwCE7pPdZTGL
gnGOi26cuH1oKVX3BHjUiAnMGOu3GQTjHCrYtVs9B+PufNptn8pCj7cXdAM0/w5FDCkAgNC1JOd+
GdX+vrIkcCdg7uPndNYL/+RYMM4NjUf1YssFVGTn1nzSQDAOABC6wqC/rHQfvS+cKExfxzi3iZGf
KxJu56F8mVz7Vdc0zy0AABW6w0SgONB1UB26E659rMUd4+yvAOh2jHMjUpoUaZ1bAACEbiGerPBU
5LVi1ZacjMjpc6yjO56P0ONHcoJxskkRVToAIHTfnLhzLBhnM1AXhac+YxieKn6/qDFXJIYoDcbZ
93Pfk+enPAfjPve0dJKRKd4Tn7UMxmW/d8SwAgAI3X/V2rOQt82KYcL0947/YNwt58RVa+YE5tah
YJy7RPU6xlUZejQ7ZoJxAIDQFZCtGGYaniqWd8ar4mCc+bErTWAUO8a5/3uljnE7XHIHAISuWqH7
bRMaW8g7i3Dt45fpBONcq9biIOCy0ucoQbqGeZTzt5Yd4ybOd7h2y2UE4wAAoasIzm0ycBCeirK3
ayzv6h5xOrhy4Ca4fMnJ789XF4yzWQ3uZtuOcQbnXK/lLQDA7Ar9xOcMl1K1XW7ztoLwlLlQokok
Z1mtr3/2Mi9tUcf7yq9/2nMw7o6nVRLph+fWWN42EyMAAITuTXI+l9uUCcV/MO5j53pu+zMirknJ
feJc4cTIcGEY/aVU7a/A5JyDHenEiGAcACB072jd/5U/9119MG5S3lmVZ2EwTrCqm27HuHJ5Z8nY
vGNcM84tAAAVumBCIF9KNVsu4donFINxZvI2Pt5E3MuBTitdCX3nSrp4oZxY+J3ROrcAAAhdUsmt
3ezYMa6WLmo7fQt526wYpjUxkp1b977re+f2Y8KlVG2rfwAAhO7GiTtygnHCNcxPfPrcaDDOrYta
VNkExmL/1m9/sYrk1m/zHIy7/WlxCDBrAjNxbv1W/wAACF1QHfaE90QXS7frVsX5D8Z99LxK+nvu
4+dUJkYezm0szEiYhx5rPrcAAAjdZcWw0UHbvKtY3YLbIxb++/IJjM+JUR3ntnxVvaaeWwCAmRa6
9HLxklF1aC+4cO2MdjDO6wRmWbhf9ay85tJnvrnnFgAAoUsqrrWPeV5KVVdyOm1R5Z339IXu0LZ1
7ZbL/J9bOsYBAEL3z4nb94Nx9sunFlbpJz55zn65zcltR02cwKzfIQjGFbzP+mc8B+M+8/ThanBW
Pdd3CidVVueWjnEAgNArJBBLzn65TdkKZ87M3awUjLslIxhnn+6v4LK7ddtW83NLxzgAQOi1syyU
XHEwzn3b7Q7G2VeuCpLbWXbsuV58DugYBwAIvUUVunRwPghPuROu3drAYFzHMhhnPqHRrdDdJlZ+
uwEe7ku49kmCcQCA0OsX3JgY1m7xHIwre3RKRl9yrAX39h0u5xcv/uJlsub+HPouax8XBuPoGAcA
CL0adsNTRUupmklqMjx1q2F4qnj7UaUTGEMhr38uIxhXeqWj/P71+mdf7PV4T3y6eDU4w8/A/NzS
MQ4AEHpDqnSHR5v2X2bhKfug2GnfBzr3kfM9p30puXIw99FheVou/qJ7RUInnBjQMQ4AEHrzOFhK
1b26XCrcrntQbEnpeGPhlYOC9cJ3FLYrYtmhejaZZNAxDgAQemMrdJvq0mzQlz4S11371GWhetVq
X62bBeP8bVcyeem5rLZmMFmTd4z7FME4AEDonjmoLI3knTl4Z4WnTpw5F2s89+1Bcn3hwiVRoTzd
t6t3yV1wHtbO+O8Yl64hH9AxDgAQul9O3DZ0/9f/c+Naj4n5q9AdWP+8cCnVvO3e7jkY96lzex3j
ZBOgyWDcJ8w7xu3Le+RVODECAEDookG7J3xu3L5jnJkA/Afjbjqv8kjd3M3nDydG9turJxhXvj9W
57ZA3jbfGQAAhC5AZ7nNkue+DQRQX8c4twBb41redpSa3iTnqm8hb+PtAgAgdO0qzmW5zaHtWlZv
B3TXP93CYFyzWt72lNr7xs57tLcP4fptBOMAAKE3R3AHHeMywlMnPnEudqzedCWnHYxr1rGKlzxd
+6QgGOc2EQQAQOgu7HYVM1luU1ilO1ZyzQzG3akUjPuc52DcrRYd4yxatc59PCMYZ38lIGK4AQCE
3qQqfQ9ZMC7//fwH4248L65aM4NxH7EMxlXXUMd9ydOipVTlj+oRjAMAhO4dm85u2bStq1gsnGjY
TWDka8z7O7f2zWacQo8ukyIAAISuUcX5GJxN+37n012/rYJgnK+JRuCwxnwdj675mVRJJ0Xh+mcI
xgEAQq9H6AUiWLt1Mjw1d8u52MO+6Qfj7CcakdPnWC5S3aVUHa/ArH3KMBhn3zeeKh0AELpPTnwy
Izzl73Euu+q/aQ1Xcli/qyQY5/i42PrnPQfjPuGwlOrBIQT5rVqTyVoapBwILuUXTYwAABC6t0rO
Hq2GK/6DcR9SCsZ9+PzhEwNNuiJhMIEp6RWgsExu4fK7AAAIXcCy6F+XdYxzv7c8HcE4889RvWOc
Q6OfvMxA30LenYyV/LjkDgAIvY4qrkRyxcE4N2mmdNc/26JgnM3EqKJH1xJh9wRtWov2KbaQd9ax
h+u3v5hgHAAg9EqEbiG5rPDU3MfGgnHW95Z3paBRydkH40b/PlL6HJuRGRjb1/Xb8oJxxvKmYxwA
IPQq2O0qNtwxzq1zXPnja+Xy7lRyaTYQJ9I763fnBOOEa8Gv3+k3GDf38ZJgnNn+TmYGPkrHOABA
6NNapVvcVza8NLv3/v6DcTecFyfSMyV3k8NSqlVW6e7HSsc4AEDorcGlMYqRiHb6FvKuqiVquu24
KWvBjx27Xsc4/5MMOsYBAEJvTYUuH/SllX93/fYXhy063vJgXPX3lbX6AcSO2zsgXL+DYBwAIPRa
B/2sx5/WP50RjPvo+Vh6X1lJcm7VpW0wzl+THp1za7h/65/13DFO93gBAKHPLkVdxSyeXfa/lGqd
VWuJnNbvyQjGye8rp53oIp8HOnfL/rm1b89aeOtj7ubzZh3jiokYegAAoStIzqHxyDD+7iuPCsV/
MO6D56WVdLbkPtTQYFwgvfWx057MAAAg9Fn/AIJA2DHONTxVx8Ile+8bSyppxQmMTsc4+/asQy9B
MI5n0QEAoVdfoQtFVH7J3a0S7q7fMUXBuLpWXjOW907WvzcLxtmc34OOcZ8nGAcACL1awZk0XMno
KjZ383lpJaxVyQmCcTvpK/I+gVFfZc5I3pn7vH575r39nqAZEVU6ACB0DUa6irkn08sl53YVoMZg
3M7oa0iC6/e+qDgY5zhBWr/bczDuo+m53VlxmaQViXfuI+fpGAcACL2R1N1wJV8G/oNx12cF4/Ll
bSy5GzKCcU14nCvIaE3r49zSMQ4AEHojsb8MbXYfve+7OvQjuZ3YQt5tl5x76NH23Np9b7jkDgAI
XYHYsYI+IFz/zGVdcXU4+T7d9c9PWce4qit0+VUS945xxd+bcP1OgnEAgND1Bn37xHJuxTX34fOx
YJKgKTlpP/LI20TB7EqHX6Fb3gpYvyMnGCfYpuK5BQCEPrvM3XLusPuXu4DzJaeyAEytVWtn/b4X
LVlv1+SJgXs8B+NuPj+5TK6HKwdzN411jHMj4tcHAAi9KVX64aNQ+feVZfgPxl33jEo/8rkPDgXj
PC7RWtkEJv8zoWMcACD01hB0+vadxHZMRNTM8NRwx7jAan+KZXSQG3CnnmCc2zPjdIwDAITeQGIL
eWcN2uH67ZkhJ+mCKN31OysOxplV1zrBuGBHv0K3X7DFf8e4PcL1uwjGAQBCVxj0jeVtXHHN3XRe
OuhrVXJ9wf6kRNYThczPcvwZ+AqeRXeYxKx/XtAxrvpzCwAIfXaZ++g5ve5fpl3Uqhz05ZfGO+tf
sAjGWTSwWb/3RZHXc/vhsaY3vlaZuzGjY5zVZKZT9MQAAABCV63kivEbnjrcL//BuPc/0/OwmUnJ
Xb8vT/vuc9VMYOpYZa54AkEwDgAQugLS57PlwbjswV/rsmwsqCwLgnE70slCMzrGuXQD9Hd/HgAA
oasLLn/QDtfvsAjGmV/67a7fVUPHOPclT7XarTbvWE2eFijrGHc3wTgAQOj6g76P57NvPB8Lm8to
Vel9wXGmfxI5ybP8PXWF7ngu1u+kYxwAIPRWMHfzWPcvt/utMslVOehb3FfO89TGFw2DcbbtVr/g
ORh301DHOI/nYO4GOsYBAEJvT5VuJ0l/wbhRAfoPxr0vOxhn6d5JyV33jF271epyAz3B50/HOABA
6C1DK8DWdyqBNSv0vbeJra8QSyRnPluoNhgna6ZDxzgAQOgNJBaKKFz/fEkwzvY+68FSqne3KBgX
JPJs6qI0/vvMx5JLHLvfmXsIxgEAQvc/6LsvoZpbpc99aCgY16xKThSM67QxMyBg/R/oGAcACL0V
zH2koPuXufDMJOchQe9NcgLRbdzvsJSqwaRm/Yueg3EfGusYZ7gfpZO1Dzp2jBvtnhfx6wMAhF53
lT7JYqk83QTqPRh3/L1KHePe/4yZPKu/IiG7t+8ajCtvfUswDgAQukLV2hduwe9SqrqCS4kllbSx
5CwnSYGG5Gzu7buGHi361ldwbgEAoc80doKbHPjD9Tsdl1It3m53/Z4agnGulfS4PO0337wKPfdY
d2LHvvUHhOv3vohgHAAg9MoEJxj45z54PtbYrgf6jvtyQGTzOVpsXkfowq5963cbdIxzm7RRpQMA
QvfJ3E37ISfZwB8pCaWe9cJLjLzx5exgnPWhjm/3fs/BuBssg3HZ+5fVTKc8GFf+YUT8+gAAoWtI
Tsai0nb9B+Pe84zOFYn3lQTjZM1cZFW6ffU8vH/lmQG3+/QE4wAAoStgH2BzWW7Tatu792j1gnEa
yX55M5dqOsbZ7V9eZqAvWHVNa/ICAAh95okdq7cDwvW7HINxxSnp7vq9LwobeEWiOBjne7vSY9VZ
+S52nCAcfme+QDAOABC6b8TLbWYN/HPXjwXjmvOIk/SRuqj0c2yK0EUBtr1zlEyqosKJQrOOFwAQ
+uwyd+PQspi+JRfs9ISPODVxJbLOxgMeOsZlbfdLnoNx1xsE48onWlnNdAQd4y69In59AIDQm1DJ
aS63efge/oNx79YJxh1/b0Ewrt5H9XoW8s5C0DGu8JgJxgEAQldAa8lTrSVapegF49xWmNNLfwc7
y0pXSawXuxn7Ey65AwBCVxGcfTU52jHubstgnNl7dNe/0LJgnM7EyE+F7ja5yjvW2ELeWX8WbtxP
MA4AELp/wbktn1ooo7nrnokFkwTNKl0/GOd2BaB6oRvs5/p9mff2e06HWs0VGABA6LPJ3A1jy2L6
XC88ELeXbWLV2tl4MCMY52OJ1i97DsZdN3Rv332xlqxmOivJ/2kQWHxmdIwDAITelCq9GK2GK/6D
ce96Rly1Zk00jr/bcCnV4vfWex7d/Vj9dYwz2y4AAEIX4H4Zuvhea7+hzyvHwomGPNmf/d56HeM8
9hkYObdB484tACD0mSYWSCglXL/Hw1Kqk+/VXf+iUjBOZ6KxLKiEdSSnd6yx4zm9FKbc+BLBOABA
6L7pCSSUO/DPvf8Z6URBq5LTC8Y1bZU5D01vkklVZL3d+jISAIDQZ5e5Dw51jHO/jBoZCcVSeEFT
g3FfyQnGCdl4wHMw7v3ypjdBdjOdwzCl+5MMEb8+AEDoTZDcyCC9k7+Uqrm8s7zgPxj3TqVg3LuE
wbjiFexk59atAczBnxWfW/fPkWAcACB0BcwuQ+e3ELXqGGdR1DUvGFc0gdFbh9ydnKY3xoce+OsY
V9G5BQCEPtPEFvLOIly/dzLklGyi59x8ZD8Yt3F/jcE4+wmMzjrkwgrdQt7mDX5KOsYZEG48QDAO
ABC690HfYfGOUhkdf19BMK7ehUv6wglMVDhR8L8OuWDuknPVwGKikUyqIuerEUXvSZUOAAjdL3PX
n3dbFnN0sI7yJwud5qW/hROYja++UL6UatZ2H/QbjEsmVel5XfE90Tj+7pKlVM3OecSvDwAQuobk
XCutosYo8vS3/2DcO57pedjMpOTe6RiMM1kURTbZkvYE0OoGSDAOABC6An1BpVVUSet0opMTCyXZ
pvXClwUTtfxz29xugACA0GeYwHKt8Mm/Dde/8CL3jnH5793d+JJCME5+RaI8GGe9Rvru5f9qnr23
W7BFr2PcgwTjAAChVyc4QZV+/L0ZwTj7iUN1HePM9y3K+fc9C3mP3cdXqlpNw3oFx56zGhwd4wAA
oTeNuQ/sh5xkAbZyybk1I6m14UoeGw8ZBuPy5Z293a++MPJ5oMffk3Fv30N73+PvcuwYZ/KdAQBA
6MJKTob5fWU7AfgPxr39WZ1g3DsSeQY7K6byNt2u9yrd17mVLwBDMA4AELoCdgG2SSnnB+Ma9nz2
PrFQkvKlVKuT3LLjMZafW/dtaoYeAQChzzSxhbyzCDe+mBFyklf+3Y0HWhqMa47keo7HWDapigXb
3PvOfPWFBOMAAKGrDPoWl8Mz/iyrCYl9JVxNVzGdYJw8LKYvdId73jmrwfWstznelY9gHAAgdL/M
vf+ZvaVURUW6g+TqS0PLg3EPlwTjHLe/8ZDnYNy703v7Qx3j3MhqpnO4/K6JvLO78kX8+gAAoStV
coJFVbS6ivkPxr1NKRj39mcP5elxu96rdPsrJQbn1qmlLsE4AEDovknG5L7limhmImpuV7FY+O91
gnFBDcE453O703dc0Ef73AIAQp9pYosBPotw435Bx7h82XQ3Hqw5GJf9mfgJxlWTGZDeCtDrGPcQ
wTgAQOi+S/Se8D5r5sB//F0WwbhqG8z0BRVrSmQ9UagrM+Dh3OasBkfHOABA6E3j+HufcV9K9XDA
NpecXaWoV7W6VKz7f7fxtYJgnCB0t/Gw52Ccy2pwBlckjr/9WTrGAQBCbyjSZ5YXSytEt0rRfzDu
2md7NvI2fUb7+Nv2g3Eyqg3Gmcg4oGMcACD0NtF3qqI1uorpCy4lFlw1KJKR1jrkEpbtzulEat3P
uTXPIgAAIHSBTGLJylydNBj3ZU/BuNH36G58RSEYJ+9zLgvGVZkZKKqkzRaRsQ/GmXYZfJhgHAAg
dN+Iu4p1spuQxI5i067SpVcOIuMKvTGZAbsV4IbJWQ1OnBnoEIwDAITul+PvGer+5X5ftFhy1mui
XxJPMxqujO3fxiMZwTjbzEDGZ7LxNc/BuIPV4ATHmhmMe9uzhV0Ghd8ZAACEXovkiu7/loWnyruN
+Q/G/d2z0kVasq9I/P2zqeBWBFcjdKt092P1kxmY3D7BOABA6AqYXYa2v//bt5B3VYJLiQXSLZac
7DJ0NR3j/NwKkD7TzyV3AEDoDRRcuPFA1lKqOz3LPt/jdDe++sLQ+9HKH6nLa3m7LNyzahal8bNP
sdX3ZPLvwo2vEYwDAITetEE/c+A//vZnY6F4tR5xqi4YV/exBuJ9ym56U5ig79R5iwEAEPrscvzd
go5xh4O4THLVtoCVB+MeLVlK1e1Y08Bd5PXcvq3g3r65hLMyA3vfGdkthohfHwAg9Dollz2Im4en
7CTgPxj3VqVg3N9lyNNeePoTGF/NdIKOdElagnEAgNAV6DtUbiYi6ivdw5USCycaZi1vmyC59N6+
TjMdOsYBAEJvHOMd4+wH/3DjwaxgnLiK62483KJgXKehwTi3z6hMvLHDlY3R78wjBOMAAKE3Y9Af
HcSzmpDEQploSa5ZwTjdqlW6AE9n4x/pGAcACL0VHH+nZTAuexB3k1y5EJoZjPu6YzCuTJ6Peg7G
XTt2b9/tUbOszAAd4wAAobeqSjevwMqDcW7VnP9g3DVKwbi3OspTewIzfIvB7RzTMQ4AEHqL6Nv1
XZ9Y9COv4Upf2Cu+bcG4WHi8pxWOdVlpkkHHOABA6I0jKAo5Ga3YFW589YV+llId3a/uxj8qBONc
G+q0cS34sgrdfZ9iwSRh7zvzKME4AEDo/gVnLm/jgf/4tRbBuGrXC+8L9iclMq787d4j3Pi6d8lJ
16fPbnpTtgCP2ftQpQMAQvfJ8XekS6nuDISbiYyFYresahMbrnQ2vmEZjKupLWpm0xv7/ZqcrF3z
rGaYEgAQOlRWyZmGnGye+84e9P0H497yrLQJTLHkbCcIo38bqZ1b98vjdsE4eZgSABA6CNAKOWl1
opMSC/+9W1tU91S5Owcd49wx7wZoN2HjkjsAIHQ1wUlCTg/lBONkDUi6G19TCsa5ydE8GNeUR9fk
V1/ynmKIhZPAcOMbBOMAAKH7ruLkIafs1bliD3tXT8c4l2Cc/NE1DcmJ7+3nNL2hYxwAIPSmcfxt
z7otpWp2/7f5bVEdpLT5zRcseTvW0X3xG4w7aHrjWbzH3+KhY1xAMA4AELoGWvd/pW1R/Qfj3mwX
jNv7s52RV6bk3tzQ9Hcg7gkg7xhnt/wuACB0EKB1/7cvrIYrDcZlyzvzmfw2pb/NV4PL3k+V0GPA
JXcAQOgK+Lj/+/ALs5dSlT0m1t14RCcYZyHvLGH5SX9f2uSlfaguGGe+YEt+xziLY834rMPNb76A
YBwAIPRKBn27qi5r4ZLYw2TDu+QSofQt5J0lrCh3YmQu7+FL+JcmRgqS6zmutnbp7za+kdMxzu0Y
Vc8tACD0meb4tYb3f93aokrvtVZTtVpW1pv/VB6MMxabWfXvdm6vyekYZ3e8k5O1v937zljIm45x
AIDQa6nS7as6rfvK3oNxx/72uZ4g9V0ouURoAwt5m1f/sksS0uNdzKnE6RgHAAi9cQRj93/tB2qv
95UNtislFv578wmMjyVaZSwLvhfeMwMVnFsAQOgzTSyQUEq48bWcYJyM7sajDeoYVya5IENywsq/
smO1XfkuEE+Kws1/JhgHAAjd/6Cvs3CJ26Cv3/u777AfJpfG5ZL7pxfod4wzfrxs7/bB5jdfEIkn
RdVlJAAAoc8ux//esWPc6GAdOVWI5XJpZjDunz12jFOcwBx/87PpSnArpvLOCbhN7NOxNz2X/50x
z2BE/PoAAKFXITm7wbr8vrLbEqP+g3FvcgjGGSxeUig5888g0j63Dun0/HMreyyOYBwAIHQF+sKq
tfy+spvoqg/GmR231gTGu+QSYS87PlpWfm4lFTqX3AEAoSsQZHT/siPceCRnKVXZRKG78Q3FYJx7
UrtNExjpojRLxt8Zu+9NuPktgnEACB10B323SUHW6lyxxnPfHva1L1w6NjKu/O2k519y8ja8aTOd
qKrvDAAgdBBw/K2qK4ZJB/76Hucqkty3coJxDZvAHHvTc+l5XXGQbXFm4I3PNXOVOQBA6KC2Ypj0
ESf/wbg3PqeSSD/2f56TPTHQUZNctcvk0jEOABB6jQRq3b/6ApFoVegpcaWSM38v3Y5xbsn0pYq/
MwCA0EFNcOWEG18vCMZZLbk58nhVd/ObLwhVq1bNteDtJwoamYGeoLVv0T7Fwv0KN79NMA4AoYOe
4OyrytzuX8ffvB+MM5N33vrkesE493BcVCo5t4lCuPkvih3jHCvqzW8Jg3HVP5oIAAh9Njl+zbPy
7l8lwThDeWe9bzODcf+SIbmyatgMv8G4//PcYcc49wnQZGbgDTmZAbtJQ8SvDwChg4bkbAbjyb/N
XW7TQt5Z7+s/GPeG3WCc//T3G1oajCs/F+Yd4+w4zc8OAKGDbwK1AFtfUMF5r1iNJVdVMM5wYiRk
WXBe889BIFiiNafyBwCEDnJiR+EeEG58I2cpVdmz2d3Nf1IJxtmnv10mMP4mRrLJi04iXVr5dzf/
VeXcAgBCn+kKvSe895s58B9703Oxh32rPv3tLjnxxMh7+lu6Pn2y78k+RRPn9m9yzq3d43FU6QAI
HXxy/M2OS6mODtyRUiXXzGDctwuCcZ4nRhKOvcGiY5x9Ir0nWHVN69wCAEKfeZraVcx/MO5vHNui
ju73UsZ2NdeY93tu7a4iLBpX/3bbJRgHgNBBgb7toDz2OFr+fWW3JUW1L8uWV+nF+y0LxtmvMe9O
GmCTJdLNz22NVyMAAKHD3qAfW8g761nycPObL+gaVXF2FV1381vKwTj7yrJYcrKqtVm3GIpbwErv
z3c3v0MwDgChg9qgb9UIpnzhklggTT3J2Sbwg45pn/NYcDViry3qvyl2jHOcXG3+a0Yw7vUWoUfu
owMAQq+G43/77Eoi7oGFvLNkFRXK051qq1bDiUci3khju76PNzMzYL9Yi/nja/VfkQAAhE6VLqym
/a1ENrof/oNx/3tfcp6f0U62uxeMk203quWKhGtmgI5xAIDQG4Z84RK3lcjKhbKkdLzSx9cWK92u
DJ3MQGB4bvPPARU6AEIHBWJhJR1u/vMLukYVup1Qupv/UkEwruoJTP7nWH9mwPxY5R3jvkswDgCh
g2/E4amsgf/YG5+LhZdltap0ae912Xrh+Z9juPmdmoJxBfuY7FM0cW5fJ+wYp3duAQChzy7H3pTR
GMV+FbbIq1DqErppMO47hsG4mhemOfZ6g2Y65fu4ZFT98zw6ACD0BmB6ada+MYq0GvYfjHu9UjDu
9c+l2xwIr0hEqhOYRoUedwjGASB0UECrMUoTVyLTDLA1Lxg33DHODQ+hx53R194jklToAAgdFAZ9
6f3ucPNbBcE492q4u/ntGQjG6ba87Qn2x+FZ9Ex5Z71Hd/N7p0J+fAAIHeoe9CcFMHkZ+g37wTgd
eTZRcrHldsYnN+HmdysOxhlMtpJ9iibO7WvTc2ssbzrGAQBCr4Jjb7RYMSx/cI68ThbqrFodJTex
XbcrE36Dca8bCsb5b8NLxzgAQOitqNJ9LLcpX+HMfzBOSXLJdkeDcW5E3s+svA3vYuF26RgHAAi9
QQRqy2L2RVVroBiM05BcpwUd40o/m53xV3ua6QAAQgdBx7g9ws1v5yyl6r4KWUp389+mOBg3GSSr
+Nn7nYwgm/GxSpsSdTe/TzAOAKFDdYO++SCdtXBJbDnI2wiluuOdlK99MM4sBR5u/vspnWCcmbwz
z83m905FE+f2ry/EwvOqdW4BAKHPLsfeUBCMMx+ko5x/38SuYgbBuGL5JuKN8uXZMX+Ea/Iz9huM
e+1zg+T9V4Tn1qxjXKcR5xYAEDpVurCDmtZ9Zf/BuNeOB+Ms5Hsos6WM7a4k/35gIW/zNean7dwq
dQMEAIQO0mUxfTZcUaxYD99jp2ch7ywZlkvOb7tVybldFv77di2TCwAIfcaJBdLdW0r1Xw2XUrWr
WLub36kwGCe/DK21xrysQnc/r0X7JF8m9zGCcQAIHaob9AWSO/b6oWCc/+YmsuP1vEiL08RoknDz
+6eq6RhncfzJPkUT5/avLjR1mVyAxrD1f09Gyeu65PVI8npy60cnr0booMqxv9kPxtkvnzr8inL+
Tvrct/9BX75IS2b6uyNvLev9eI/99YXBxGpw9ucjElf/2ceN0GGa5b2TfMefSF73JK9r9r/vM/md
/32+HhWzJ7kwd/Atp+i+8pLjPqX4D8YlkkuEnE5iuoLPa2m8Ik+2u5JsdyD8HCMPlX7WOQgF50B2
bvOPm2ActFbe+9/9xd3/mk9OZ/I7j9CrJw05XV3rcptVVeiHMooE+7ZYKE/3z3FR7dz6Pwd9x0mL
ytUIgIbJu8rxrNFwyb36Cl26Qlq4+R3DYJxteOq7LQzGNesHHwvPQbj52KnyboBBgbzzOsb9B8E4
aJa8DS6bS+gm25y57zwVevW4r442KqMReRx73XNxImTpvqXbHXg/Xh3xpsd/RjCx2pXnsasvrDTo
WLPP7V9eiDd/cMqtQtc9twBVV9624+RMfeep0Cvm2OsNllJ1b4wiDaE1MxiXkf7uyB8T8368x/7q
worzAGIaeiTpDrNdeduMkzP3nadCr69KDx0rrU7HVzBu971Hmr74D8b91YVBImRZMC6rak3kmWx3
0BkPodl9nlGnjmBcUMG5nXw/gnEwLZW3KTP3nUfodRAYhqfsq62+hbyrrOLMg3HZP97Fguo/FOyX
fjAusD7etoUeAXk3Qd585ztccq+L2PmLftAx7rsFwTj3hUu6m99TCU8tG7x30eVk/+uFa/3gD0KP
ecdSfrzh5n+c8hp63P8/dbceJxgHZvJu0GVzyba7Wz85OVPfeSr0eugZfdHLZTQyMTj22ufizX8/
5WO7A5Xj9f+oXnr8Zxx+6Ify/MGp7rG/9ByMk1cbk+f2Ly7Em4+fkn5tCMbBNFTeNtueqe88FXoN
HHvdWDDObZYalQrFbbunFX5s4vT35mOnIueJUYXBmd3JwXDHODfKz63N6dVehAeovKutvG06bc7U
d54KvS607v8GgvCUguB2JXf1hUEiZP/BuKsvrCTbHXRcA4bBJXnGClckQsv9KD23gUkwLqh4sgZU
3vVU3qZ/O1PfeYReH+7BuN174rk/0PR+9TWCH1CkdLz1BuMCy4mR1rk1G6DMMgNuz6ID8p5Wec/8
d55L7vVhVhWOh9uCS+G2cPPfT9kvpTr+o8i4dLX5/VMaP4LqgnF2l/v0OsbZhdaGX+HW4wYd4+zp
bv2QYFzb5T2Dl80l+9Hd+unJmfnOU6HXRy+n8raZoWYtXNLb/N4p6cx6qSNvUpN9vIHjjz+/BWx6
/GcE+7WbKj/2Fx6DcQXNdALzbUyc26OvvhAnQpZWPwTjqLynofK2Hc9m4juP0Gvi2GufW0kq7PRL
Fgq+5FEnu3943OmILp0vKvyoe9IfdCLeKE17O1+RKP7Bx97O7WsurGw+fmoQmN5HDyzOrf199MH+
v0mvkPSO/vmFmF8f8p5ieWdtO/0sH0PoUEWVHgq+5EVdxSRC9x+M+8sLg80fjAXj3O4Bj16RSCrr
RPRmE6P8H33U8RyMC/KCcYGXc7tUIu/+/vGkAl/hZ4a8Z0jeWcxMMA6h10kdHePMfhSR0vFKJxqL
xhMjux+9TjAuUDi3waVzi7yRN/KuqUBB6JBFOhDLVgz7/qnu/qIg44IT/Sg2Hzu1dOzqC77voy8r
XTmwb7c6+nHodIwTntut/zzVzZD0o+kLeTdS3ktj8o7U3gx529DdeuJkePSq5wcIHTTpefhyZy1c
0ktEL/1R6AXj3H/g4mBczsexmyo/+hrFjnHmOzVceU+AyJE38nba7kwE4xB6jRz76wsrm99TWjGs
icE4DxOYzcdPRcdeYxaMsxwjvAbjjr76wsrWD0vP7bC80/fmsjnyRt46252JYBxCb0KVHqisGOZ+
v7roMTHJBOYvLgw2/yOjY5z9I3WjVySSyjqpsCflaffjjzoaHeMOzy3yRt7Iu77tzkQwDqHXTd3B
uOJH4jSO13qiEZhPYELBj1/jUb37kv+9D3kjb+Rd5XZ3bMZJhA5eSau1M4Ivd7j52Klu2tc8Q3Ci
H0ZSTS8lVXWlwTiDXSwOxvlf0c0ZnvlG3si7Fnln0d1aDsOjpwcDhA6a9Dx8ubMWLult/uCU9Iek
GowL3H7wah3jtn54qpve++YribyRd6vlXTSeIXTQI33kbPP7pwbC++hRJ/v+b5xZDcubm0h+w+Jg
XCLeKG2DajwxMn8vr8E4QN7IuzZ55wl9qoNxCL0JaC2luhfKkgxw/i9Dv+bCYOtx/8G43FS52QAw
ULgSAcgbedcn76y/n/pgHEJvBnbBuMBQvIFDx7jJyl9rAiPZtnkwLl/epM2RN/KeXnlXUqAgdMgi
FcsZwZc6b8WwnvRHuvn4qaVjr2lJx7jJJwaQN/JG3rMp7yy6Wz8Lw6OvnN5gHEJvBj0PA0LWwiW9
RPQ+ZrXN6hiXP9N+dGiChLyRN/KubNuNk3fR2IHQQY/0kbPNHxTc/zX7QkedsmBc4PQ7qrdjXPZ+
D7b+89RSIuyR7ST/3+lneCvfKOSNvJF3znanOhiH0JtVpYeCL/RizhfZ6H51YF8NO3P01RcGWz/M
CMblyLvDZfO2Sfwe5I28GyDvLKY6GIfQm0KFHeMCu/2KlI43a6KBvFvO0T97vrf1o5ORx+8J8kbe
Prc71cE4hN4cUoGdEXyhw83HT3WPja0Y5um576WkqvZ9H/3ryWsFeU8lPaeBE3kjb/3tdrd+HoZH
XzGdwTiE3qxBUPqFnnw++zUXeluPn5L+OLwH4xJ5P9o5DLHBrAkdeSPvmj67IJjeYNzvMfY0g/1H
zgYjX+jxV8F3f/8V5fxBbLqtHBY5Q9NFeq87eV2TvCKFzffzvqCO3z+jL3/ptpuyH6rb3sl4edpn
rWNU/OwSeU+8OlN82Z0KvUkYdIwL3MQrbeSyxMlpt7w7+Wnzezu+290GSl33qLypvEvkncGgM5bL
OfI/B1N7aw+hN4uRYFxg/yORLaWav92IUzMV8q5ksnb0T56Pt358Enkjb+SN0Ge5QDcIxhX/SPJW
DLO9Pz/xw+DsTIW8s9CarJkH45A38kbeCH0K6Xn4kaQD9EjjhDShnojeWN6kzadW3tnb/tHJpaN/
+rxGN8Al5I28kTdCn0mOvsZixbB8sjsh7a0XHiLv2ZZ3zuCs0d63j7yRN/JG6LNO+iW+RvDjy+yE
lIj7Kj5a5J3DosJ79ZA38kbeCH3W6Tv8+IZ/GI/xETZW3mFn75ZI03qb+w/G/a/n462fnETeyBt5
I/SZpmch790fBpfNW8JeQ4tHGjg4R0rH69YxDnkjb0Do08DRV1+It/7zFPKelclaQwbnrR+fXDr6
JxUF45A38gaEPkNchbyncLL2p88Ptn50MnuVufrFphOMQ97IGxD6TA/8f34h5lOYWtJze3UDxaa/
7j3yRt6A0AG0yUmb/1VSVfsOGaaPc12tdiDuj4r5D8a96vl464mT9R4j8kbeCB1g5uSdRfYz/bLB
Oh0wz3jals+/jZQ+blkwDnkjb0DoAJbyzuK0kuDqlnf2Z/XTk0tJVV1fMM56v3d0Pg/kDQgdoNXy
zqvQvXL0T55f2frxyXSgDeuUd8HxVhOMQ97IGxA6gJK8s+gm8g0TCQ8UqtawZnlnbbvajnHIG3kD
QgfkrSy28ap14Pk9RpbJrUne1VyRiJ6Pt+KTyBt5A0KHGZR3uC+W051mtEf1H4wbXya3Oe1RtT7r
/fvoyBt5A0KHWRD5NckAek/HtOlKdYOzTjAuaNQxHp6HJ04uHb3KdzBuJz8Y1xR5K24beQNCh9li
b9DretyWLzQWLlnZ+mlJMK6+qlQvGIe8kTcgdJgJ9B/ncpxkbP3kZJhIeKBwvGFDjnGYeoJxyBsq
ZPu/w3B/8pq+5o+8fHA9QgfwVbX+6fO9rR+dbOqg7z8Y18kJxtUvNv9XJE4P4q3lEHlDE+R9ev+/
w1cD03OG0GG2GAqsLR39s+dvVXiLdFCMGjXoB5ck19yOcV7EdimwFintj7+lVJE3uMs7i+72LxbC
Iy+/OGj78SN0KJX37g8jGP1hJNV0nFTVscKgH3nepo+/bU7HOL/yzj73cbh0NBo0YylV5A1+5Z33
G9C4CofQoXnyzmFpf7DzSV/0r/WatPi/DH3V8ytbTzgE46rpsFbPUqr1yHtl/3uMvGdH3nm/8cfa
/jkhdORt8cMYQX+5zXrknUV366cnw6OvqjgYV1971OrObfXyTvdjef+/qbwHjAozJ++s797pafjs
EDrydkWjz3lv68cn65Z30fH6HvwPg3HN6m3u/9y+chBv/TxUOTHIG3l7+B0sTcPnidCRt8MIqvgD
CIaCcfXJO68FbE3BuMo7rEVKn6o4GIe8kbfSuNDd/uVCeORl7Q7GIXTk7fzjSKrpKKmqY4VBP6pZ
3llUFIxrRnvUrZ+FS0lVXWswDnkj74on9a0PxiH0dgg82pdcbfIu+AH4Fnq/oc8payxcsrIVh+kA
EtYp74LjrSwYh7yRd8Xyztpu64NxCL0NBJ23JP97TUPENkxDltusZADobj1xMjx6lXIwrjkLk6QD
86MaVySQN/JugLyzaH0wDqG3g+oe56q7an3V872tn55s0jGOz+AHnrddvpRqtceod25fMYi3/ytE
3si7CfLO+vvWB+MQejvo1f7jyBn0t35yspsuNuL5neJOUTCr3hawukup1n+MakLf5yTyRt4NkHcW
3e1fL4RH/qi9wTiE3gLSjmyZfc6bca9Z4z76YTCuUffTd9qzlKqH78jWz8Morap97g4yR94NkXfR
eIbQoYIqPVB7VEzyt5F3oQfCWwx+5F1J1Xr09GBl62cGwbh6BlCNyRog76bIO+8739pgHEJvC74W
t/DfpKXejnF68s6iu7UchomEBwrHG+oeo+FmA/VzC8i7KfLOotXBOITeHvqqP5AmPc511fO9rSdO
Ku23eOCqbinVauV9wHBgjeq8yfJOv4sB8va8L60OxiH09tBrgLyzCLd+erJ79FUVBOOql3ee0PWC
cfQ2B+Rdlbyz6G7/ZiE88tJ2BuMQektIO7Jd6nMeNO7H4f9eq/VSqkoD1+Tfnlb4HL02cEHeyBt5
d6RXHRE6KFN2H72+H0fUqXQp1crknfdj9ztZe8VgZevnbsE45I28kXdHowVsK4NxCL1dHAq9WT8O
xWBcrfLOorv1szA8+srqg3HIG3kj70qOsbXBOITergq939Afh0af895WHNYt76Lj9S3NkWAc8kbe
yLu2Y2xtMA6ht69Cb96PIw3GxSe76WIjnrcbd9LL+c3MDHi9JBdMLqWKvJF386Q5ffLOorv95EJ4
5CXtC8Yh9BZx9FXPx8Z9zqv/4VUfjKtvcNFaSvWTyBt5I+9GbLeVwTiE3s4qfamBP7yooxmMa9bg
4v2SXCLwtCK/la838kbejdjnVgbjEHobhd6EFrCTA1ezl1L1Owh0t34ehkdfQRWNvJH3FMg7i1YG
4xB62/AVjPPfpMV/MO6Vg97Wz8LGDQL7gbVWL+KAvJE38q52PEPokF2h1y/vrG2HW8thN11sxPPx
xh2TBjPVd1jr8lVE3sh7KuSd9bfd7f7C0pHFi702fecRess4etXzcWGf83rbo+oupVqfvEmbI2/k
Pb3yLhrPEDpUUqUvNaS3+fDfRp02LKWKvJE38kbe5bRutUGE3kaMllKtpcNa85ZSRd7IG3kjbzda
dx8dobeTfgPkXckP4OgrBr2tn4fIG3kjb+Rd9X5ECB0qYKfX0B9IuPWzsHv0ldUE45D3FEj8F+E1
yX/egryRdxP3Y/u3C0tH/rA9wTiE3kKORoN4os95c34gKsG4RN4R8p5KThdXQsgbedd6jK0KxiH0
tmJ0H72WH0jkW+jBXjAOeU8nfeQ9o/JuxzG2KhiH0NtLU1vAev8BJOJ+NPnPo5zyacTg9hHyRt71
HWOrgnG/x4DS4som2P8yB56/1EHGy5wlTs30sf3fYXf7F2HkfbL28ouxx++ev+91U/ajCdt12fb0
HGPUpt8pFXpb8dXn3P/sNtz+r7C7v9gItFTe+5VJOpgt7k/SQrVvjOntIypvKm/9bU/c2kPooM7R
Vw5i6z7nij+QscS5RjAOqpf35N8nVfqRlw98n1uZ0JE38vYk7yN/0L410BH69JA/EFYn79bPapG3
CTuakzXzboDIG3kjb4Q+1UKvVt6d/QG9tz8Qx6TNp1beWVTXDRB5I2/kjdBnBs99zg3knT4qRvU9
O/LOGkC9hx6PvOxivP2rBeSNvJE3Qp/5Ch15I28teWexpPhdXkLeyBt5N/O0QAVs/TwsHaWRN/L2
PApclVbVXo/1VwuPJPtxDfJG3rlj1B9cZIyiQp+JKn0JeU/ldPtJM4FX3mHNfzDO5+0j5I28ETq0
csyffIYXeU/XZC2sWd5Z223OMrnIG3kDQp8ivt7Zv6eEvKeNnXTAu7qBg7P/YNwfXYy3f72AvJE3
IPTZJZF4zKcwtaTn9kwDBaQfjJuFMBzyBoQOUD8ZgbXBkZcPrvcuuIYuTJJU01FaVXven3pbwCJv
5I3QAWZO3llp8/SRGq9CP/Lyiyvbv1wYdIyT7TryLqjSY88fdb+yY0TeyBuhAyDvHLrbv1gIEwkP
vFfptkKvpsNavcE45I28AaEDjMh7r3Wurbw7OWnz9N/7FnpxMK6+9qj+g3EvvRhv/2YBeSNvQOgA
lco7T3KPed71dNA9U7O8s7a9pLTtnsq2kTfyRugAyNtiAD2tJLhGSiKppqO0qvb8jnKhI2/kjdAB
kLdwcPZ/GfqPL65s/2ooGNesx7nqD8Yhb+QNCB1aLvJfhN/vNKe3+QHd7V8uhEde5jkYFzgE46oR
ULXBOOSNvAGhw1Rydc3yLqpaB563ad4xrloB+b8i8ZKL8faTC8gbeQNChxkiHfCiRvQ2H922bjCu
WQKqJxiHvAEQOkwTO709oTducD6t8I69hh3jJZJqOkqraoXj1W0Bi7wBoQOYMxRY6xx5ufee8/0m
iW3obzUWLlnZ/o2njnH+5dbspVSRNyB0AGd5p1XzeNr8MYVB3//g6Oc57+72rxfCRMIDhao1rPQY
zWjOUqrIGxA6gFd551VxfqvWl13sbf9yoW55Fx2vb6HbB+OquVzt/9wuXoy3+wv1HR/yBoQOyHuc
S4G1cPsXC910sRHPu5cOrFHN8s4TerXBuPruNesvpYq8ARA61CLvooE/Vhj0o5rlnYVuMK5hvc2T
ajpKq2rP+1JHC1jkDQgdkLfBABp1/N9HX07+97qa5V3J41xHXnpxZftJj8E4v895N28pVeQNCB2Q
txd5Z1FdeKr+1Ht3+zcLYSLhgcLxhkr7LPn86g/GIW8AhI68VeSdV8X5rVr/+OJg+1cLK8m+dGuU
d9Hx+hZ6eTCung5r/s/tH16Mt3+7gLwBEPoME+wOrk/ULO8swu1fLnSPvMxzMC7oFDeYqfYYxyWn
G4xrTntUvWBc8baRNwBCn17SBi7bvwjrlnfRwB973uqytdCruSSvE4wLaj7GnO0m1XSUVtUKk7Ul
5A2A0GeYHbOEcPX3mqOOTtK9Scc4XKH7nay95OLKdi8jGNeM1LvGZO2Tyes+5A1gxu/xEUwlvczB
efzlc+A327ZueKoZx3hAd/vJhVBBsj2VY5R/dt7PbSLyATIHQOizTr8RYpv8W40+54Nkuys1y7uy
Kr3j63Eu/9+PJX52AAgdNKq4+uWdRbj96wWNRHqvkmO0366G5GLxMepMfBA6AEIH71XryxzCSf7l
XWUlt9wAeWdtt7qlVOs7xoNJxr3bv1O4xQAAxhCKm17yg3H1tkeNOlUE45oTFPM7WVu8uLLd3w/G
1XOMcYe0OQBChxqE3oze5sPbXlTYdk95n13/vpvIN0wkPFA43rCCY0TeAAgdaicoCU9N1+Ncg+0n
F9KGNV2lfZb8fTOWUkXeAAgdWl2h1y3vLMLt3yx008VGFI43qlneeUKvdilV5A2A0GF6OPLHF+Pt
Xy/ULe8iycWeDzm7Y1z97VH1g3FB6d/29j8f5A2A0KHFVbrbJW7dqj7qaATjgkYd4/Dkxe9k7Q8v
rmz/LnMp1d5Y5R3zEwBA6DBLQq/+knz1wbj6bjt0t3+7ECYSHnh+1/Qyfhd5AwBCnw36DRLbMP6r
1qWLg+3efjCuWZmBg+P1KvRE4Nfz9QaAYWgsM81o9f3udKQNTMLtJxU6xgUOl931jlF1AgMAgNBn
iCMv9XQZtj29v5cbdowH2zrNtxEAEDpIsUs1V9c6NKrtWKtvj0qFDgDqcA99NoQubwFrK7ZydJdS
rfcYSZsDAEIH7/RVxWa7bc3HuRYvDrZ/K+wYh7wBAKFDI/Hd59xfb/hwu7/QTRcbUajSI+QNAAgd
poojL7kYbz/p2DFOf2GX6jrGIW8AQOgwBZQ3mKlnVbaoU8VSqsgbABA6TKXQm7Okqk4wDnkDAEKH
qaRsKdVq5J2FRp/zwfbvLgXjkDcAIHSYugq9bnlnEW7/dqGbLjbieQ+uYlUxAJi92g1mgu3+wk5D
v0FXJUKncgYAEEKnuFmu0l3k7b/DWsSpAQBA6KAl9Oraoy5yagAA5HAPfXboF0pWA7O0+TKnBgAA
oYNthU5vcwCAqYRQ3Ayx/VtPwTjkDQBAhQ61V+l2z34jbwAAhA4tEzryBgBA6NAK+sgbAAChQ9vJ
XkoVeQMATMUQDzPF9u8WHkHeAAAAAAAAAAAAAAAAAACZ/H8BBgAgCZajFsmZRwAAAABJRU5ErkJg
gg==
"
       id="image56" />
  </g>
</svg>
 + mediatype: image/svg+xml + install: + spec: + deployments: null + strategy: "" + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - logging + - loki + links: + - name: Documentation + url: https://loki-operator.dev/ + maintainers: + - email: loki-operator-team@googlegroups.com + name: Grafana Loki SIG Operator + maturity: alpha + minKubeVersion: 1.21.1 + provider: + name: Grafana Loki SIG Operator + version: 0.0.0 diff --git a/operator/config/manifests/community/kustomization.yaml b/operator/config/manifests/community/kustomization.yaml new file mode 100644 index 000000000000..69459e68bc3e --- /dev/null +++ b/operator/config/manifests/community/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- ../../overlays/community +- ../../samples +- ../../scorecard diff --git a/operator/config/manifests/kustomization.yaml b/operator/config/manifests/kustomization.yaml deleted file mode 100644 index 5d9120c449ad..000000000000 --- a/operator/config/manifests/kustomization.yaml +++ /dev/null @@ -1,4 +0,0 @@ -resources: -- ../overlays/openshift -- ../samples -- ../scorecard diff --git a/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml similarity index 99% rename from operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml rename to operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml index ea00721b6742..6eca8e2a72af 100644 --- a/operator/config/manifests/bases/loki-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml @@ -1474,10 +1474,10 @@ spec: - name: Loki Operator url: https://github.com/grafana/loki maintainers: - - email: loki-operator-team@googlegroups.com - name: Grafana Loki SIG Operator + - email: team-logging@redhat.com + name: Red Hat, AOS Logging maturity: alpha minKubeVersion: 1.21.1 provider: - name: Grafana.com + name: Red Hat version: 0.0.0 diff --git a/operator/config/manifests/openshift/kustomization.yaml b/operator/config/manifests/openshift/kustomization.yaml new file mode 100644 index 000000000000..2c11a266f9e3 --- /dev/null +++ b/operator/config/manifests/openshift/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- ../../overlays/openshift +- ../../samples +- ../../scorecard diff --git a/operator/config/overlays/production/controller_manager_config.yaml b/operator/config/overlays/community/controller_manager_config.yaml similarity index 65% rename from operator/config/overlays/production/controller_manager_config.yaml rename to operator/config/overlays/community/controller_manager_config.yaml index b0471ff9fd9f..ea36278035ed 100644 --- a/operator/config/overlays/production/controller_manager_config.yaml +++ b/operator/config/overlays/community/controller_manager_config.yaml @@ -3,7 +3,7 @@ kind: ProjectConfig health: healthProbeBindAddress: :8081 metrics: - bindAddress: :8080 + bindAddress: 127.0.0.1:8080 webhook: port: 9443 leaderElection: @@ -12,3 +12,9 @@ leaderElection: featureGates: lokiStackGateway: true runtimeSeccompProfile: false + # + # Webhook feature gates + # + lokiStackWebhook: true + alertingRuleWebhook: true + recordingRuleWebhook: true diff --git a/operator/config/overlays/production/kustomization.yaml b/operator/config/overlays/community/kustomization.yaml similarity index 83% rename from operator/config/overlays/production/kustomization.yaml rename to operator/config/overlays/community/kustomization.yaml index 1cb2fc24f173..213f650e0948 100644 --- a/operator/config/overlays/production/kustomization.yaml +++ b/operator/config/overlays/community/kustomization.yaml @@ -4,7 +4,6 @@ resources: - ../../manager - ../../webhook - ../../certmanager -- ../../prometheus # Adds namespace to all resources. namespace: loki-operator @@ -23,8 +22,8 @@ labels: app.kubernetes.io/managed-by: operator-lifecycle-manager includeSelectors: true - pairs: - app.kubernetes.io/instance: loki-operator-v0.0.1 - app.kubernetes.io/version: "0.0.1" + app.kubernetes.io/instance: loki-operator-v0.1.0 + app.kubernetes.io/version: "0.1.0" generatorOptions: disableNameSuffixHash: true @@ -39,15 +38,12 @@ patchesStrategicMerge: - manager_related_image_patch.yaml - manager_run_flags_patch.yaml - manager_webhook_patch.yaml -- prometheus_service_monitor_patch.yaml - webhookcainjection_patch.yaml images: - name: controller - # Change this to docker.io/grafana/loki-operator once the following issue is resolved: - # https://github.com/grafana/loki/issues/5617 - newName: quay.io/openshift-logging/loki-operator - newTag: v0.0.1 + newName: docker.io/grafana/loki-operator + newTag: main-39f2856 # the following config is for teaching kustomize how to do var substitution vars: diff --git a/operator/config/overlays/community/manager_auth_proxy_patch.yaml b/operator/config/overlays/community/manager_auth_proxy_patch.yaml new file mode 100644 index 000000000000..8eb088101cde --- /dev/null +++ b/operator/config/overlays/community/manager_auth_proxy_patch.yaml @@ -0,0 +1,25 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + image: quay.io/openshift/origin-kube-rbac-proxy:latest + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=0" + ports: + - containerPort: 8443 + name: https + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true diff --git a/operator/config/overlays/production/manager_related_image_patch.yaml b/operator/config/overlays/community/manager_related_image_patch.yaml similarity index 100% rename from operator/config/overlays/production/manager_related_image_patch.yaml rename to operator/config/overlays/community/manager_related_image_patch.yaml diff --git a/operator/config/overlays/production/manager_run_flags_patch.yaml b/operator/config/overlays/community/manager_run_flags_patch.yaml similarity index 100% rename from operator/config/overlays/production/manager_run_flags_patch.yaml rename to operator/config/overlays/community/manager_run_flags_patch.yaml diff --git a/operator/config/overlays/production/manager_webhook_patch.yaml b/operator/config/overlays/community/manager_webhook_patch.yaml similarity index 87% rename from operator/config/overlays/production/manager_webhook_patch.yaml rename to operator/config/overlays/community/manager_webhook_patch.yaml index e43b1f0e94b6..a276796f734a 100644 --- a/operator/config/overlays/production/manager_webhook_patch.yaml +++ b/operator/config/overlays/community/manager_webhook_patch.yaml @@ -19,4 +19,4 @@ spec: - name: webhook-cert secret: defaultMode: 420 - secretName: loki-operator-webhook-server-cert + secretName: loki-operator-controller-manager-service-cert diff --git a/operator/config/overlays/community/prometheus_service_monitor_patch.yaml b/operator/config/overlays/community/prometheus_service_monitor_patch.yaml new file mode 100644 index 000000000000..47fdd4077486 --- /dev/null +++ b/operator/config/overlays/community/prometheus_service_monitor_patch.yaml @@ -0,0 +1,14 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + name: loki-operator + name: metrics-monitor +spec: + endpoints: + - path: /metrics + targetPort: 8443 + scheme: http + interval: 30s + scrapeTimeout: 10s diff --git a/operator/config/overlays/production/webhookcainjection_patch.yaml b/operator/config/overlays/community/webhookcainjection_patch.yaml similarity index 100% rename from operator/config/overlays/production/webhookcainjection_patch.yaml rename to operator/config/overlays/community/webhookcainjection_patch.yaml diff --git a/operator/config/overlays/production/manager_auth_proxy_patch.yaml b/operator/config/overlays/production/manager_auth_proxy_patch.yaml deleted file mode 100644 index dd7a508e0c4f..000000000000 --- a/operator/config/overlays/production/manager_auth_proxy_patch.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the -# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager -spec: - template: - spec: - containers: - - name: kube-rbac-proxy - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.11.0 - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--tls-cert-file=/var/run/secrets/serving-cert/tls.crt" - - "--tls-private-key-file=/var/run/secrets/serving-cert/tls.key" - - "--v=0" - ports: - - containerPort: 8443 - name: https - volumeMounts: - - mountPath: /var/run/secrets/serving-cert - name: loki-operator-metrics-cert - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - volumes: - - name: loki-operator-metrics-cert - secret: - defaultMode: 420 - optional: true - secretName: loki-operator-metrics - securityContext: - runAsNonRoot: true diff --git a/operator/config/overlays/production/prometheus_service_monitor_patch.yaml b/operator/config/overlays/production/prometheus_service_monitor_patch.yaml deleted file mode 100644 index ca346871f88c..000000000000 --- a/operator/config/overlays/production/prometheus_service_monitor_patch.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Prometheus Monitor Service (Metrics) -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - name: loki-operator - name: metrics-monitor -spec: - endpoints: - - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - path: /metrics - targetPort: 8443 - scheme: https - interval: 30s - scrapeTimeout: 10s - tlsConfig: - caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt - serverName: loki-operator-controller-manager-metrics-service.loki-operator.svc diff --git a/operator/controllers/loki/lokistack_controller.go b/operator/controllers/loki/lokistack_controller.go index c3cdf52e3993..f7d066221cea 100644 --- a/operator/controllers/loki/lokistack_controller.go +++ b/operator/controllers/loki/lokistack_controller.go @@ -3,6 +3,7 @@ package controllers import ( "context" "errors" + "time" "github.com/go-logr/logr" "github.com/google/go-cmp/cmp" @@ -158,7 +159,7 @@ func (r *LokiStackReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return res, derr } - err = status.Refresh(ctx, r.Client, req) + err = status.Refresh(ctx, r.Client, req, time.Now()) if err != nil { return ctrl.Result{}, err } diff --git a/operator/docs/operator/hack_loki_operator.md b/operator/docs/operator/hack_loki_operator.md index 58e22b6328c3..cce551071f87 100644 --- a/operator/docs/operator/hack_loki_operator.md +++ b/operator/docs/operator/hack_loki_operator.md @@ -30,10 +30,10 @@ Loki Operator is the Kubernetes Operator for [Loki](https://grafana.com/docs/lok * Build and push the container image and then deploy the operator with: ```console - make oci-build oci-push deploy REGISTRY_ORG=$YOUR_QUAY_ORG VERSION=latest + make oci-build oci-push deploy REGISTRY_BASE=$YOUR_REPO_ORG VERSION=latest ``` - where `$YOUR_QUAY_ORG` is your personal [quay.io](http://quay.io/) account where you can push container images. + where `$YOUR_REPO_ORG` is your personal registry location, for example a [quay.io](http://quay.io/) account where you can push container images. The above command will deploy the operator to your active Kubernetes cluster defined by your local [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/). The operator will be running in the `default` namespace. @@ -108,10 +108,10 @@ It will undeploy controller from the configured Kubernetes cluster in [~/.kube/c * Build and push the container image [2] and then deploy the operator with: ```console - make olm-deploy REGISTRY_ORG=$YOUR_QUAY_ORG VERSION=$VERSION + make olm-deploy REGISTRY_BASE=$YOUR_REPO_ORG VERSION=$VERSION ``` - where `$YOUR_QUAY_ORG` is your personal [quay.io](http://quay.io/) account where you can push container images and `$VERSION` can be any random version number such as `v0.0.1`. + where `$YOUR_REPO_ORG` is your personal registry location, for example a [quay.io](http://quay.io/) account where you can push container images and `$VERSION` can be any random version number such as `v0.0.1`. The above command will deploy the operator to your active Openshift cluster defined by your local [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/). The operator will be running in the `openshift-operators-redhat` namespace. diff --git a/operator/docs/operator/storage_size_calculator.md b/operator/docs/operator/storage_size_calculator.md index 883490dd0142..19bcfe2d474b 100644 --- a/operator/docs/operator/storage_size_calculator.md +++ b/operator/docs/operator/storage_size_calculator.md @@ -26,7 +26,7 @@ Storage Size Calculator is used to have an idea on how to properly size a Loki c * Deploy the storage size calculator by executing following command in the terminal: ```console - make deploy-size-calculator REGISTRY_ORG=$YOUR_QUAY_ORG + make deploy-size-calculator REGISTRY_ORG_OPENSHIFT=$YOUR_QUAY_ORG ``` where `$YOUR_QUAY_ORG` is your personal [quay.io](http://quay.io/) account where you can push container images. @@ -88,7 +88,7 @@ If you want to contribute to the storage size calculator, you can follow this lo After replacing the image name, deploy the storage size calculator to test your changes: ```console - make deploy-size-calculator REGISTRY_ORG=$YOUR_QUAY_ORG + make deploy-size-calculator REGISTRY_ORG_OPENSHIFT=$YOUR_QUAY_ORG ``` where `$YOUR_QUAY_ORG` is your personal [quay.io](http://quay.io/) account where you pushed your container image. diff --git a/operator/docs/prologue/quickstart.md b/operator/docs/prologue/quickstart.md index cfeff43c387e..850263ed6532 100644 --- a/operator/docs/prologue/quickstart.md +++ b/operator/docs/prologue/quickstart.md @@ -29,7 +29,7 @@ make quickstart If you want to test local changes from your repository fork, you need to provide an image registry organization that you own that has an image repository name `loki-operator`, e.g. `quay.io/my-company-org/loki-operator`. The command to use your custom images is: ```shell -make quickstart REGISTRY_ORG=my-company-org +make quickstart REGISTRY_BASE=quay.io/my-company-org ``` ## Further reading diff --git a/operator/go.mod b/operator/go.mod index 329d64bcea88..429aea47e62b 100644 --- a/operator/go.mod +++ b/operator/go.mod @@ -146,7 +146,7 @@ require ( go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.21.0 // indirect go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect - go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect + go4.org/unsafe/assume-no-moving-gc v0.0.0-20230209150437-ee73d164e760 // indirect golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect @@ -154,7 +154,7 @@ require ( golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/text v0.3.8 // indirect golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect golang.org/x/tools v0.1.12 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect diff --git a/operator/go.sum b/operator/go.sum index 9f831634cdf7..c6678595504a 100644 --- a/operator/go.sum +++ b/operator/go.sum @@ -1603,8 +1603,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/operator/hack/.operatorhub-pr-template.md b/operator/hack/.operatorhub-pr-template.md new file mode 100644 index 000000000000..3070374af52a --- /dev/null +++ b/operator/hack/.operatorhub-pr-template.md @@ -0,0 +1,51 @@ +Thanks submitting your Operator. Please check below list before you create your Pull Request. + +### New Submissions + +* [ ] Are you familiar with our [contribution guidelines](https://github.com/operator-framework/community-operators/blob/master/docs/contributing-via-pr.md)? +* [ ] Have you [packaged and deployed](https://github.com/operator-framework/community-operators/blob/master/docs/testing-operators.md) your Operator for Operator Framework? +* [ ] Have you tested your Operator with all Custom Resource Definitions? +* [ ] Have you tested your Operator in all supported [installation modes](https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md#operator-metadata)? +* [ ] Have you considered whether you want use [semantic versioning order](https://github.com/operator-framework/community-operators/blob/master/docs/operator-ci-yaml.md#semver-mode)? +* [ ] Is your submission [signed](https://github.com/operator-framework/community-operators/blob/master/docs/contributing-prerequisites.md#sign-your-work)? +* [ ] Is operator [icon](https://github.com/operator-framework/community-operators/blob/master/docs/packaging-operator.md#operator-icon) set? + +### Updates to existing Operators + +* [ ] Did you create a `ci.yaml` file according to the [update instructions](https://github.com/operator-framework/community-operators/blob/master/docs/operator-ci-yaml.md)? +* [ ] Is your new CSV pointing to the previous version with the `replaces` property if you chose `replaces-mode` via the `updateGraph` property in `ci.yaml`? +* [ ] Is your new CSV referenced in the [appropriate channel](https://github.com/operator-framework/community-operators/blob/master/docs/packaging-operator.md#channels) defined in the `package.yaml` or `annotations.yaml` ? +* [ ] Have you tested an update to your Operator when deployed via OLM? +* [ ] Is your submission [signed](https://github.com/operator-framework/community-operators/blob/master/docs/contributing-prerequisites.md#sign-your-work)? + +### Your submission should not + +* [ ] Modify more than one operator +* [ ] Modify an Operator you don't own +* [ ] Rename an operator - please remove and add with a different name instead +* [ ] Submit operators to both `upstream-community-operators` and `community-operators` at once +* [ ] Modify any files outside the above mentioned folders +* [ ] Contain more than one commit. **Please squash your commits.** + +### Operator Description must contain (in order) + +1. [ ] Description about the managed Application and where to find more information +2. [ ] Features and capabilities of your Operator and how to use it +3. [ ] Any manual steps about potential pre-requisites for using your Operator + +### Operator Metadata should contain + +* [ ] Human readable name and 1-liner description about your Operator +* [ ] Valid [category name](https://github.com/operator-framework/community-operators/blob/master/docs/packaging-operator.md#categories)1 +* [ ] One of the pre-defined [capability levels](https://github.com/operator-framework/operator-courier/blob/4d1a25d2c8d52f7de6297ec18d8afd6521236aa2/operatorcourier/validate.py#L556)2 +* [ ] Links to the maintainer, source code and documentation +* [ ] Example templates for all Custom Resource Definitions intended to be used +* [ ] A quadratic logo + +Remember that you can preview your CSV [here](https://operatorhub.io/preview). + +-- + +1 If you feel your Operator does not fit any of the pre-defined categories, file an issue against this repo and explain your need + +2 For more information see [here](https://sdk.operatorframework.io/docs/overview/#operator-capability-level) diff --git a/operator/hack/addons_dev.yaml b/operator/hack/addons_dev.yaml index 11f92bf7d6f6..b39d8faf527e 100644 --- a/operator/hack/addons_dev.yaml +++ b/operator/hack/addons_dev.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: logcli - image: docker.io/grafana/logcli:2.6.1-amd64 + image: docker.io/grafana/logcli:2.7.3-amd64 imagePullPolicy: IfNotPresent command: - /bin/sh @@ -73,7 +73,7 @@ spec: spec: containers: - name: promtail - image: docker.io/grafana/promtail:2.6.1 + image: docker.io/grafana/promtail:2.7.3 args: - -config.file=/etc/promtail/promtail.yaml - -log.level=info diff --git a/operator/hack/addons_ocp.yaml b/operator/hack/addons_ocp.yaml index 8f000d3c19ab..c2d2fa22aa83 100644 --- a/operator/hack/addons_ocp.yaml +++ b/operator/hack/addons_ocp.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: logcli - image: docker.io/grafana/logcli:2.6.1-amd64 + image: docker.io/grafana/logcli:2.7.3-amd64 imagePullPolicy: IfNotPresent command: - /bin/sh @@ -70,7 +70,7 @@ spec: spec: containers: - name: promtail - image: docker.io/grafana/promtail:2.6.1 + image: docker.io/grafana/promtail:2.7.3 args: - -config.file=/etc/promtail/promtail.yaml - -log.level=info diff --git a/operator/hack/operatorhub.sh b/operator/hack/operatorhub.sh new file mode 100755 index 000000000000..3535bb06e380 --- /dev/null +++ b/operator/hack/operatorhub.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +set -e -u -o pipefail + +COMMUNITY_OPERATORS_REPOSITORY="k8s-operatorhub/community-operators" +UPSTREAM_REPOSITORY="redhat-openshift-ecosystem/community-operators-prod" +LOCAL_REPOSITORIES_PATH=${LOCAL_REPOSITORIES_PATH:-"$(dirname "$(dirname "$(dirname "$(pwd)")")")"} + +if [[ ! -d "${LOCAL_REPOSITORIES_PATH}/${COMMUNITY_OPERATORS_REPOSITORY}" ]]; then + echo "${LOCAL_REPOSITORIES_PATH}/${COMMUNITY_OPERATORS_REPOSITORY} doesn't exist, aborting." + exit 1 +fi + +if [[ ! -d "${LOCAL_REPOSITORIES_PATH}/${UPSTREAM_REPOSITORY}" ]]; then + echo "${LOCAL_REPOSITORIES_PATH}/${UPSTREAM_REPOSITORY} doesn't exist, aborting." + exit 1 +fi + +SOURCE_DIR=$(pwd) +VERSION=$(grep "VERSION ?= " Makefile | awk -F= '{print $3}' | xargs) + +for dest in ${COMMUNITY_OPERATORS_REPOSITORY} ${UPSTREAM_REPOSITORY}; do + ( + cd "${LOCAL_REPOSITORIES_PATH}/${dest}" || exit + + if ! git remote | grep upstream > /dev/null; + then + echo "Cannot find a remote named 'upstream'. Adding one." + git remote add upstream "git@github.com:${dest}.git" + fi + + git fetch -q upstream + git checkout -q main + git rebase -q upstream/main + + mkdir -p "operators/loki-operator/${VERSION}" + cp -r "${SOURCE_DIR}/bundle/community"/* "operators/loki-operator/${VERSION}/" + rm "operators/loki-operator/${VERSION}/bundle.Dockerfile" + + if [[ "${dest}" = "${UPSTREAM_REPOSITORY}" ]]; then + python3 - << END +import os, yaml +with open("./operators/loki-operator/${VERSION}/metadata/annotations.yaml", 'r') as f: + y=yaml.safe_load(f) or {} + y['annotations']['com.redhat.openshift.versions'] = os.getenv('SUPPORTED_OCP_VERSIONS') +with open("./operators/loki-operator/${VERSION}/metadata/annotations.yaml", 'w') as f: + yaml.dump(y, f) +END + fi + + if ! git checkout -q -b "update-loki-operator-to-${VERSION}"; + then + echo "Cannot switch to the new branch update-loki-operator-${dest}-to-${VERSION}. Aborting" + exit 1 + fi + + git add . + git commit -sqm "Update loki-operator to v${VERSION}" + + if ! command -v gh > /dev/null; + then + echo "'gh' command not found, can't submit the PR on your behalf." + exit 0 + fi + + echo "Submitting PR on your behalf via 'gh'" + gh pr create --title "Update loki-operator to v${VERSION}" --body-file "${SOURCE_DIR}/hack/.checked-pr-template.md" + ) +done + +echo "Completed." diff --git a/operator/internal/handlers/internal/gateway/tenant_configmap.go b/operator/internal/handlers/internal/gateway/tenant_configsecret.go similarity index 73% rename from operator/internal/handlers/internal/gateway/tenant_configmap.go rename to operator/internal/handlers/internal/gateway/tenant_configsecret.go index cdbe167329c3..b8af0bb4905e 100644 --- a/operator/internal/handlers/internal/gateway/tenant_configmap.go +++ b/operator/internal/handlers/internal/gateway/tenant_configsecret.go @@ -35,22 +35,22 @@ type openShiftSpec struct { CookieSecret string `json:"cookieSecret"` } -// GetTenantConfigMapData returns the tenantName, tenantId, cookieSecret +// GetTenantConfigSecretData returns the tenantName, tenantId, cookieSecret // clusters to auto-create redirect URLs for OpenShift Auth or an error. -func GetTenantConfigMapData(ctx context.Context, k k8s.Client, req ctrl.Request) (map[string]manifests.TenantConfig, error) { - var tenantConfigMap corev1.ConfigMap +func GetTenantConfigSecretData(ctx context.Context, k k8s.Client, req ctrl.Request) (map[string]manifests.TenantConfig, error) { + var tenantSecret corev1.Secret key := client.ObjectKey{Name: manifests.GatewayName(req.Name), Namespace: req.Namespace} - if err := k.Get(ctx, key, &tenantConfigMap); err != nil { - return nil, kverrors.Wrap(err, "couldn't find tenant configMap.") + if err := k.Get(ctx, key, &tenantSecret); err != nil { + return nil, kverrors.Wrap(err, "couldn't find tenant secret.") } - tcm, err := extractTenantConfigMap(&tenantConfigMap) + ts, err := extractTenantConfigMap(&tenantSecret) if err != nil { - return nil, kverrors.Wrap(err, "error occurred in extracting tenants.yaml configMap.") + return nil, kverrors.Wrap(err, "error occurred in extracting tenants.yaml secret.") } - tcmMap := make(map[string]manifests.TenantConfig) - for _, tenant := range tcm.Tenants { + tsMap := make(map[string]manifests.TenantConfig) + for _, tenant := range ts.Tenants { tc := manifests.TenantConfig{} if tenant.OpenShift != nil { tc.OpenShift = &manifests.TenantOpenShiftSpec{ @@ -58,17 +58,17 @@ func GetTenantConfigMapData(ctx context.Context, k k8s.Client, req ctrl.Request) } } - tcmMap[tenant.Name] = tc + tsMap[tenant.Name] = tc } - return tcmMap, nil + return tsMap, nil } // extractTenantConfigMap extracts tenants.yaml data if valid. // This is to be used to configure tenant's authentication spec when exists. -func extractTenantConfigMap(cm *corev1.ConfigMap) (*tenantsConfigJSON, error) { +func extractTenantConfigMap(s *corev1.Secret) (*tenantsConfigJSON, error) { // Extract required fields from tenants.yaml - tenantConfigYAML, ok := cm.BinaryData[LokiGatewayTenantFileName] + tenantConfigYAML, ok := s.Data[LokiGatewayTenantFileName] if !ok { return nil, kverrors.New("missing tenants.yaml file in configMap.") } diff --git a/operator/internal/handlers/internal/gateway/tenant_configmap_test.go b/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go similarity index 87% rename from operator/internal/handlers/internal/gateway/tenant_configmap_test.go rename to operator/internal/handlers/internal/gateway/tenant_configsecret_test.go index 3349296ab134..b6783aa15ca9 100644 --- a/operator/internal/handlers/internal/gateway/tenant_configmap_test.go +++ b/operator/internal/handlers/internal/gateway/tenant_configsecret_test.go @@ -36,7 +36,7 @@ tenants: cookieSecret: test789 `) -func TestGetTenantConfigMapData_ConfigMapExist(t *testing.T) { +func TestGetTenantConfigSecretData_SecretExist(t *testing.T) { k := &k8sfakes.FakeClient{} r := ctrl.Request{ NamespacedName: types.NamespacedName{ @@ -47,12 +47,12 @@ func TestGetTenantConfigMapData_ConfigMapExist(t *testing.T) { k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { if name.Name == "lokistack-dev-gateway" && name.Namespace == "some-ns" { - k.SetClientObject(object, &corev1.ConfigMap{ + k.SetClientObject(object, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "lokistack-dev-gateway", Namespace: "some-ns", }, - BinaryData: map[string][]byte{ + Data: map[string][]byte{ "tenants.yaml": tenantConfigData, }, }) @@ -60,7 +60,7 @@ func TestGetTenantConfigMapData_ConfigMapExist(t *testing.T) { return nil } - ts, err := GetTenantConfigMapData(context.TODO(), k, r) + ts, err := GetTenantConfigSecretData(context.TODO(), k, r) require.NotNil(t, ts) require.NoError(t, err) @@ -84,7 +84,7 @@ func TestGetTenantConfigMapData_ConfigMapExist(t *testing.T) { require.Equal(t, expected, ts) } -func TestGetTenantConfigMapData_ConfigMapNotExist(t *testing.T) { +func TestGetTenantConfigSecretData_SecretNotExist(t *testing.T) { k := &k8sfakes.FakeClient{} r := ctrl.Request{ NamespacedName: types.NamespacedName{ @@ -97,7 +97,7 @@ func TestGetTenantConfigMapData_ConfigMapNotExist(t *testing.T) { return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") } - ts, err := GetTenantConfigMapData(context.TODO(), k, r) + ts, err := GetTenantConfigSecretData(context.TODO(), k, r) require.Nil(t, ts) require.Error(t, err) } diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go index 53f1f7cd0107..477008aa1827 100644 --- a/operator/internal/handlers/lokistack_create_or_update.go +++ b/operator/internal/handlers/lokistack_create_or_update.go @@ -190,9 +190,9 @@ func CreateOrUpdateLokiStack( } // extract the existing tenant's id, cookieSecret if exists, otherwise create new. - tenantConfigs, err = gateway.GetTenantConfigMapData(ctx, k, req) + tenantConfigs, err = gateway.GetTenantConfigSecretData(ctx, k, req) if err != nil { - ll.Error(err, "error in getting tenant config map data") + ll.Error(err, "error in getting tenant secret data") } } diff --git a/operator/internal/manifests/gateway.go b/operator/internal/manifests/gateway.go index 42e9be00955a..08588ca22088 100644 --- a/operator/internal/manifests/gateway.go +++ b/operator/internal/manifests/gateway.go @@ -30,7 +30,7 @@ var logsEndpointRe = regexp.MustCompile(`^--logs\.(?:read|tail|write|rules)\.end // BuildGateway returns a list of k8s objects for Loki Stack Gateway func BuildGateway(opts Options) ([]client.Object, error) { - cm, sha1C, err := gatewayConfigMap(opts) + cm, tenantSecret, sha1C, err := gatewayConfigObjs(opts) if err != nil { return nil, err } @@ -45,7 +45,7 @@ func BuildGateway(opts Options) ([]client.Object, error) { return nil, err } - objs := []client.Object{cm, dpl, sa, saToken, svc, ing} + objs := []client.Object{cm, tenantSecret, dpl, sa, saToken, svc, ing} minTLSVersion := opts.TLSProfile.MinTLSVersion ciphersList := opts.TLSProfile.Ciphers @@ -104,10 +104,8 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment { { Name: "tenants", VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: GatewayName(opts.Name), - }, + Secret: &corev1.SecretVolumeSource{ + SecretName: GatewayName(opts.Name), }, }, }, @@ -349,36 +347,49 @@ func NewServiceAccountTokenSecret(opts Options) client.Object { } } -// gatewayConfigMap creates a configMap for rbac.yaml and tenants.yaml -func gatewayConfigMap(opt Options) (*corev1.ConfigMap, string, error) { +// gatewayConfigObjs creates a configMap for rbac.yaml and a secret for tenants.yaml +func gatewayConfigObjs(opt Options) (*corev1.ConfigMap, *corev1.Secret, string, error) { cfg := gatewayConfigOptions(opt) rbacConfig, tenantsConfig, regoConfig, err := gateway.Build(cfg) if err != nil { - return nil, "", err + return nil, nil, "", err } s := sha1.New() _, err = s.Write(tenantsConfig) if err != nil { - return nil, "", err + return nil, nil, "", err } sha1C := fmt.Sprintf("%x", s.Sum(nil)) return &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - Kind: "ConfigMap", - APIVersion: corev1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: GatewayName(opt.Name), - Labels: commonLabels(opt.Name), - }, - BinaryData: map[string][]byte{ - gateway.LokiGatewayRbacFileName: rbacConfig, - gateway.LokiGatewayTenantFileName: tenantsConfig, - gateway.LokiGatewayRegoFileName: regoConfig, - }, - }, sha1C, nil + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: GatewayName(opt.Name), + Labels: commonLabels(opt.Name), + }, + BinaryData: map[string][]byte{ + gateway.LokiGatewayRbacFileName: rbacConfig, + gateway.LokiGatewayRegoFileName: regoConfig, + }, + }, &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: GatewayName(opt.Name), + Labels: ComponentLabels(LabelGatewayComponent, opt.Name), + Namespace: opt.Namespace, + }, + Data: map[string][]byte{ + gateway.LokiGatewayTenantFileName: tenantsConfig, + }, + Type: corev1.SecretTypeOpaque, + }, sha1C, nil } // gatewayConfigOptions converts Options to gateway.Options diff --git a/operator/internal/manifests/gateway_test.go b/operator/internal/manifests/gateway_test.go index f23317fbece0..9d6b08ed3e7b 100644 --- a/operator/internal/manifests/gateway_test.go +++ b/operator/internal/manifests/gateway_test.go @@ -152,7 +152,7 @@ func TestGatewayConfigMap_ReturnsSHA1OfBinaryContents(t *testing.T) { }, } - _, sha1C, err := gatewayConfigMap(opts) + _, _, sha1C, err := gatewayConfigObjs(opts) require.NoError(t, err) require.NotEmpty(t, sha1C) } @@ -178,7 +178,7 @@ func TestBuildGateway_HasConfigForTenantMode(t *testing.T) { require.NoError(t, err) - d, ok := objs[1].(*appsv1.Deployment) + d, ok := objs[2].(*appsv1.Deployment) require.True(t, ok) require.Len(t, d.Spec.Template.Spec.Containers, 2) } @@ -210,7 +210,7 @@ func TestBuildGateway_HasExtraObjectsForTenantMode(t *testing.T) { }) require.NoError(t, err) - require.Len(t, objs, 11) + require.Len(t, objs, 12) } func TestBuildGateway_WithExtraObjectsForTenantMode_RouteSvcMatches(t *testing.T) { @@ -243,8 +243,8 @@ func TestBuildGateway_WithExtraObjectsForTenantMode_RouteSvcMatches(t *testing.T require.NoError(t, err) - svc := objs[4].(*corev1.Service) - rt := objs[5].(*routev1.Route) + svc := objs[5].(*corev1.Service) + rt := objs[6].(*routev1.Route) require.Equal(t, svc.Kind, rt.Spec.To.Kind) require.Equal(t, svc.Name, rt.Spec.To.Name) require.Equal(t, svc.Spec.Ports[0].Name, rt.Spec.Port.TargetPort.StrVal) @@ -280,8 +280,8 @@ func TestBuildGateway_WithExtraObjectsForTenantMode_ServiceAccountNameMatches(t require.NoError(t, err) - dpl := objs[1].(*appsv1.Deployment) - sa := objs[2].(*corev1.ServiceAccount) + dpl := objs[2].(*appsv1.Deployment) + sa := objs[3].(*corev1.ServiceAccount) require.Equal(t, dpl.Spec.Template.Spec.ServiceAccountName, sa.Name) } @@ -450,7 +450,7 @@ func TestBuildGateway_WithTLSProfile(t *testing.T) { objs, err := BuildGateway(tc.options) require.NoError(t, err) - d, ok := objs[1].(*appsv1.Deployment) + d, ok := objs[2].(*appsv1.Deployment) require.True(t, ok) for _, arg := range tc.expectedArgs { @@ -659,7 +659,7 @@ func TestBuildGateway_WithRulesEnabled(t *testing.T) { objs, err := BuildGateway(tc.opts) require.NoError(t, err) - d, ok := objs[1].(*appsv1.Deployment) + d, ok := objs[2].(*appsv1.Deployment) require.True(t, ok) for _, arg := range tc.wantArgs { @@ -702,7 +702,7 @@ func TestBuildGateway_WithHTTPEncryption(t *testing.T) { require.NoError(t, err) - dpl := objs[1].(*appsv1.Deployment) + dpl := objs[2].(*appsv1.Deployment) require.NotNil(t, dpl) require.Len(t, dpl.Spec.Template.Spec.Containers, 1) @@ -793,10 +793,8 @@ func TestBuildGateway_WithHTTPEncryption(t *testing.T) { { Name: "tenants", VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "abcd-gateway", - }, + Secret: &corev1.SecretVolumeSource{ + SecretName: "abcd-gateway", }, }, }, diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go index 3a6991e62972..99e52891f21d 100644 --- a/operator/internal/manifests/internal/config/build_test.go +++ b/operator/internal/manifests/internal/config/build_test.go @@ -115,7 +115,6 @@ memberlist: querier: engine: max_look_back_period: 30s - timeout: 3m extra_query_delay: 0s max_concurrent: 2 query_ingesters_within: 3h @@ -194,8 +193,8 @@ overrides: Namespace: "test-ns", Name: "test", Compactor: Address{ - FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", - Port: 9095, + FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, }, FrontendWorker: Address{ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", @@ -351,7 +350,6 @@ memberlist: querier: engine: max_look_back_period: 30s - timeout: 3m extra_query_delay: 0s max_concurrent: 2 query_ingesters_within: 3h @@ -461,8 +459,8 @@ overrides: Namespace: "test-ns", Name: "test", Compactor: Address{ - FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", - Port: 9095, + FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, }, FrontendWorker: Address{ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", @@ -535,8 +533,8 @@ func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) { Namespace: "test-ns", Name: "test", Compactor: Address{ - FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", - Port: 9095, + FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, }, FrontendWorker: Address{ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", @@ -691,7 +689,6 @@ memberlist: querier: engine: max_look_back_period: 30s - timeout: 3m extra_query_delay: 0s max_concurrent: 2 query_ingesters_within: 3h @@ -824,8 +821,8 @@ overrides: Namespace: "test-ns", Name: "test", Compactor: Address{ - FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", - Port: 9095, + FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, }, FrontendWorker: Address{ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", @@ -1028,7 +1025,6 @@ memberlist: querier: engine: max_look_back_period: 30s - timeout: 3m extra_query_delay: 0s max_concurrent: 2 query_ingesters_within: 3h @@ -1161,8 +1157,8 @@ overrides: Namespace: "test-ns", Name: "test", Compactor: Address{ - FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", - Port: 9095, + FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, }, FrontendWorker: Address{ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", @@ -1366,7 +1362,6 @@ memberlist: querier: engine: max_look_back_period: 30s - timeout: 3m extra_query_delay: 0s max_concurrent: 2 query_ingesters_within: 3h @@ -1512,8 +1507,8 @@ overrides: Namespace: "test-ns", Name: "test", Compactor: Address{ - FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", - Port: 9095, + FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, }, FrontendWorker: Address{ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", @@ -1742,7 +1737,6 @@ memberlist: querier: engine: max_look_back_period: 30s - timeout: 3m extra_query_delay: 0s max_concurrent: 2 query_ingesters_within: 3h @@ -1904,8 +1898,8 @@ overrides: Port: 9095, }, Compactor: Address{ - FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", - Port: 9095, + FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, }, StorageDirectory: "/tmp/loki", MaxConcurrent: MaxConcurrent{ @@ -2047,7 +2041,6 @@ memberlist: querier: engine: max_look_back_period: 30s - timeout: 3m extra_query_delay: 0s max_concurrent: 2 query_ingesters_within: 3h @@ -2206,8 +2199,8 @@ overrides: Namespace: "test-ns", Name: "test", Compactor: Address{ - FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", - Port: 9095, + FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, }, FrontendWorker: Address{ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", @@ -2466,7 +2459,6 @@ memberlist: querier: engine: max_look_back_period: 30s - timeout: 3m extra_query_delay: 0s max_concurrent: 2 query_ingesters_within: 3h @@ -2613,8 +2605,8 @@ overrides: Namespace: "test-ns", Name: "test", Compactor: Address{ - FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", - Port: 9095, + FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, }, FrontendWorker: Address{ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", @@ -2770,7 +2762,6 @@ memberlist: querier: engine: max_look_back_period: 30s - timeout: 3m extra_query_delay: 0s max_concurrent: 2 query_ingesters_within: 3h @@ -2957,8 +2948,8 @@ overrides: Namespace: "test-ns", Name: "test", Compactor: Address{ - FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", - Port: 9095, + FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local", + Port: 9095, }, FrontendWorker: Address{ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local", diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml index 265ad87ccccc..b2836448cf4e 100644 --- a/operator/internal/manifests/internal/config/loki-config.yaml +++ b/operator/internal/manifests/internal/config/loki-config.yaml @@ -175,7 +175,6 @@ memberlist: querier: engine: max_look_back_period: 30s - timeout: 3m extra_query_delay: 0s query_ingesters_within: 3h tail_max_duration: 1h diff --git a/operator/internal/manifests/internal/sizes.go b/operator/internal/manifests/internal/sizes.go index 88aa51c59431..75566d839d5e 100644 --- a/operator/internal/manifests/internal/sizes.go +++ b/operator/internal/manifests/internal/sizes.go @@ -223,7 +223,7 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{ MaxEntriesLimitPerQuery: 5000, MaxChunksPerQuery: 2000000, MaxQuerySeries: 500, - QueryTimeout: "1m", + QueryTimeout: "3m", }, }, }, @@ -276,7 +276,7 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{ MaxEntriesLimitPerQuery: 5000, MaxChunksPerQuery: 2000000, MaxQuerySeries: 500, - QueryTimeout: "1m", + QueryTimeout: "3m", }, }, }, @@ -329,7 +329,7 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{ MaxEntriesLimitPerQuery: 5000, MaxChunksPerQuery: 2000000, MaxQuerySeries: 500, - QueryTimeout: "1m", + QueryTimeout: "3m", }, }, }, diff --git a/operator/internal/status/components.go b/operator/internal/status/components.go index 032f6d3e71fe..a1bf2853ecba 100644 --- a/operator/internal/status/components.go +++ b/operator/internal/status/components.go @@ -9,64 +9,54 @@ import ( "github.com/grafana/loki/operator/internal/manifests" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) -// SetComponentsStatus updates the pod status map component -func SetComponentsStatus(ctx context.Context, k k8s.Client, req ctrl.Request) error { - var s lokiv1.LokiStack - if err := k.Get(ctx, req.NamespacedName, &s); err != nil { - if apierrors.IsNotFound(err) { - return nil - } - return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) - } - +// generateComponentStatus updates the pod status map component +func generateComponentStatus(ctx context.Context, k k8s.Client, s *lokiv1.LokiStack) (*lokiv1.LokiStackComponentStatus, error) { var err error - s.Status.Components = lokiv1.LokiStackComponentStatus{} - s.Status.Components.Compactor, err = appendPodStatus(ctx, k, manifests.LabelCompactorComponent, s.Name, s.Namespace) + result := &lokiv1.LokiStackComponentStatus{} + result.Compactor, err = appendPodStatus(ctx, k, manifests.LabelCompactorComponent, s.Name, s.Namespace) if err != nil { - return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelCompactorComponent) + return nil, kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelCompactorComponent) } - s.Status.Components.Querier, err = appendPodStatus(ctx, k, manifests.LabelQuerierComponent, s.Name, s.Namespace) + result.Querier, err = appendPodStatus(ctx, k, manifests.LabelQuerierComponent, s.Name, s.Namespace) if err != nil { - return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelQuerierComponent) + return nil, kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelQuerierComponent) } - s.Status.Components.Distributor, err = appendPodStatus(ctx, k, manifests.LabelDistributorComponent, s.Name, s.Namespace) + result.Distributor, err = appendPodStatus(ctx, k, manifests.LabelDistributorComponent, s.Name, s.Namespace) if err != nil { - return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelDistributorComponent) + return nil, kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelDistributorComponent) } - s.Status.Components.QueryFrontend, err = appendPodStatus(ctx, k, manifests.LabelQueryFrontendComponent, s.Name, s.Namespace) + result.QueryFrontend, err = appendPodStatus(ctx, k, manifests.LabelQueryFrontendComponent, s.Name, s.Namespace) if err != nil { - return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelQueryFrontendComponent) + return nil, kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelQueryFrontendComponent) } - s.Status.Components.IndexGateway, err = appendPodStatus(ctx, k, manifests.LabelIndexGatewayComponent, s.Name, s.Namespace) + result.IndexGateway, err = appendPodStatus(ctx, k, manifests.LabelIndexGatewayComponent, s.Name, s.Namespace) if err != nil { - return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelIngesterComponent) + return nil, kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelIngesterComponent) } - s.Status.Components.Ingester, err = appendPodStatus(ctx, k, manifests.LabelIngesterComponent, s.Name, s.Namespace) + result.Ingester, err = appendPodStatus(ctx, k, manifests.LabelIngesterComponent, s.Name, s.Namespace) if err != nil { - return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelIndexGatewayComponent) + return nil, kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelIndexGatewayComponent) } - s.Status.Components.Gateway, err = appendPodStatus(ctx, k, manifests.LabelGatewayComponent, s.Name, s.Namespace) + result.Gateway, err = appendPodStatus(ctx, k, manifests.LabelGatewayComponent, s.Name, s.Namespace) if err != nil { - return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelGatewayComponent) + return nil, kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelGatewayComponent) } - s.Status.Components.Ruler, err = appendPodStatus(ctx, k, manifests.LabelRulerComponent, s.Name, s.Namespace) + result.Ruler, err = appendPodStatus(ctx, k, manifests.LabelRulerComponent, s.Name, s.Namespace) if err != nil { - return kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelRulerComponent) + return nil, kverrors.Wrap(err, "failed lookup LokiStack component pods status", "name", manifests.LabelRulerComponent) } - return k.Status().Update(ctx, &s, &client.UpdateOptions{}) + return result, nil } func appendPodStatus(ctx context.Context, k k8s.Client, component, stack, ns string) (lokiv1.PodStatusMap, error) { diff --git a/operator/internal/status/components_test.go b/operator/internal/status/components_test.go index dd3f05c4e010..3e0987fbc871 100644 --- a/operator/internal/status/components_test.go +++ b/operator/internal/status/components_test.go @@ -1,316 +1,137 @@ -package status_test +package status import ( "context" + "fmt" "testing" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes" - "github.com/grafana/loki/operator/internal/status" + "github.com/grafana/loki/operator/internal/manifests" "github.com/stretchr/testify/require" - - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) -func TestSetComponentsStatus_WhenGetLokiStackReturnsError_ReturnError(t *testing.T) { - k := &k8sfakes.FakeClient{} - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - return apierrors.NewBadRequest("something wasn't found") - } - - err := status.SetComponentsStatus(context.TODO(), k, r) - require.Error(t, err) -} - -func TestSetComponentsStatus_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.T) { - k := &k8sfakes.FakeClient{} - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, +func createPodList(baseName string, phases ...corev1.PodPhase) *corev1.PodList { + items := []corev1.Pod{} + for i, p := range phases { + items = append(items, corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-pod-%d", baseName, i), + }, + Status: corev1.PodStatus{ + Phase: p, + }, + }) } - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") + return &corev1.PodList{ + Items: items, } - - err := status.SetComponentsStatus(context.TODO(), k, r) - require.NoError(t, err) } -func TestSetComponentsStatus_WhenListReturnError_ReturnError(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - - k.StatusStub = func() client.StatusWriter { return sw } - - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - if r.Name == name.Name && r.Namespace == name.Namespace { - k.SetClientObject(object, &s) - return nil +func setupListClient(t *testing.T, stack *lokiv1.LokiStack, componentPods map[string]*corev1.PodList) (*k8sfakes.FakeClient, *k8sfakes.FakeStatusWriter) { + k, sw := setupFakesNoError(t, stack) + k.ListStub = func(_ context.Context, list client.ObjectList, options ...client.ListOption) error { + componentLabel := "" + for _, o := range options { + if m, ok := o.(client.MatchingLabels); ok { + componentLabel = m["app.kubernetes.io/component"] + } } - return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") - } - - k.ListStub = func(_ context.Context, l client.ObjectList, opts ...client.ListOption) error { - return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") - } - - err := status.SetComponentsStatus(context.TODO(), k, r) - require.Error(t, err) -} - -func TestSetComponentsStatus_WhenPodListExisting_SetPodStatusMap(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - k.StatusStub = func() client.StatusWriter { return sw } - - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - if r.Name == name.Name && r.Namespace == name.Namespace { - k.SetClientObject(object, &s) - return nil + if componentLabel == "" { + t.Fatalf("no component label on list call: %s", options) } - return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") - } - k.ListStub = func(_ context.Context, l client.ObjectList, _ ...client.ListOption) error { - pods := v1.PodList{ - Items: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod-a", - }, - Status: v1.PodStatus{ - Phase: v1.PodPending, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod-b", - }, - Status: v1.PodStatus{ - Phase: v1.PodRunning, - }, - }, - }, + podList, ok := componentPods[componentLabel] + if !ok { + t.Fatalf("no pods found for label: %s", componentLabel) } - k.SetClientObjectList(l, &pods) - return nil - } - - expected := lokiv1.PodStatusMap{ - "Pending": []string{"pod-a"}, - "Running": []string{"pod-b"}, - } - sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { - stack := obj.(*lokiv1.LokiStack) - require.Equal(t, expected, stack.Status.Components.Compactor) + k.SetClientObjectList(list, podList) return nil } - err := status.SetComponentsStatus(context.TODO(), k, r) - require.NoError(t, err) - require.NotZero(t, k.ListCallCount()) - require.NotZero(t, k.StatusCallCount()) - require.NotZero(t, sw.UpdateCallCount()) + return k, sw } -func TestSetComponentsStatus_WhenRulerEnabled_SetPodStatusMap(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - - k.StatusStub = func() client.StatusWriter { return sw } - - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - }, - Spec: lokiv1.LokiStackSpec{ - Rules: &lokiv1.RulesSpec{ - Enabled: true, +func TestGenerateComponentStatus(t *testing.T) { + tt := []struct { + desc string + componentPods map[string]*corev1.PodList + wantComponentStatus *lokiv1.LokiStackComponentStatus + }{ + { + desc: "no pods", + componentPods: map[string]*corev1.PodList{ + manifests.LabelCompactorComponent: {}, + manifests.LabelDistributorComponent: {}, + manifests.LabelIngesterComponent: {}, + manifests.LabelQuerierComponent: {}, + manifests.LabelQueryFrontendComponent: {}, + manifests.LabelIndexGatewayComponent: {}, + manifests.LabelRulerComponent: {}, + manifests.LabelGatewayComponent: {}, }, - }, - } - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - if r.Name == name.Name && r.Namespace == name.Namespace { - k.SetClientObject(object, &s) - return nil - } - return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") - } - - k.ListStub = func(_ context.Context, l client.ObjectList, _ ...client.ListOption) error { - pods := v1.PodList{ - Items: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod-a", - }, - Status: v1.PodStatus{ - Phase: v1.PodPending, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod-b", - }, - Status: v1.PodStatus{ - Phase: v1.PodRunning, - }, - }, + wantComponentStatus: &lokiv1.LokiStackComponentStatus{ + Compactor: map[corev1.PodPhase][]string{}, + Distributor: map[corev1.PodPhase][]string{}, + IndexGateway: map[corev1.PodPhase][]string{}, + Ingester: map[corev1.PodPhase][]string{}, + Querier: map[corev1.PodPhase][]string{}, + QueryFrontend: map[corev1.PodPhase][]string{}, + Gateway: map[corev1.PodPhase][]string{}, + Ruler: map[corev1.PodPhase][]string{}, }, - } - k.SetClientObjectList(l, &pods) - return nil - } - - expected := lokiv1.PodStatusMap{ - "Pending": []string{"pod-a"}, - "Running": []string{"pod-b"}, - } - - sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { - stack := obj.(*lokiv1.LokiStack) - require.Equal(t, expected, stack.Status.Components.Ruler) - return nil - } - - err := status.SetComponentsStatus(context.TODO(), k, r) - require.NoError(t, err) - require.NotZero(t, k.ListCallCount()) - require.NotZero(t, k.StatusCallCount()) - require.NotZero(t, sw.UpdateCallCount()) -} - -func TestSetComponentsStatus_WhenRulerNotEnabled_DoNothing(t *testing.T) { - sw := &k8sfakes.FakeStatusWriter{} - k := &k8sfakes.FakeClient{} - - k.StatusStub = func() client.StatusWriter { return sw } - - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", }, - Spec: lokiv1.LokiStackSpec{ - Rules: &lokiv1.RulesSpec{ - Enabled: false, + { + desc: "all one pod running", + componentPods: map[string]*corev1.PodList{ + manifests.LabelCompactorComponent: createPodList(manifests.LabelCompactorComponent, corev1.PodRunning), + manifests.LabelDistributorComponent: createPodList(manifests.LabelDistributorComponent, corev1.PodRunning), + manifests.LabelIngesterComponent: createPodList(manifests.LabelIngesterComponent, corev1.PodRunning), + manifests.LabelQuerierComponent: createPodList(manifests.LabelQuerierComponent, corev1.PodRunning), + manifests.LabelQueryFrontendComponent: createPodList(manifests.LabelQueryFrontendComponent, corev1.PodRunning), + manifests.LabelIndexGatewayComponent: createPodList(manifests.LabelIndexGatewayComponent, corev1.PodRunning), + manifests.LabelRulerComponent: createPodList(manifests.LabelRulerComponent, corev1.PodRunning), + manifests.LabelGatewayComponent: createPodList(manifests.LabelGatewayComponent, corev1.PodRunning), + }, + wantComponentStatus: &lokiv1.LokiStackComponentStatus{ + Compactor: map[corev1.PodPhase][]string{corev1.PodRunning: {"compactor-pod-0"}}, + Distributor: map[corev1.PodPhase][]string{corev1.PodRunning: {"distributor-pod-0"}}, + IndexGateway: map[corev1.PodPhase][]string{corev1.PodRunning: {"index-gateway-pod-0"}}, + Ingester: map[corev1.PodPhase][]string{corev1.PodRunning: {"ingester-pod-0"}}, + Querier: map[corev1.PodPhase][]string{corev1.PodRunning: {"querier-pod-0"}}, + QueryFrontend: map[corev1.PodPhase][]string{corev1.PodRunning: {"query-frontend-pod-0"}}, + Gateway: map[corev1.PodPhase][]string{corev1.PodRunning: {"lokistack-gateway-pod-0"}}, + Ruler: map[corev1.PodPhase][]string{corev1.PodRunning: {"ruler-pod-0"}}, }, }, } - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - if r.Name == name.Name && r.Namespace == name.Namespace { - k.SetClientObject(object, &s) - return nil - } - return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") - } + for _, tc := range tt { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + t.Parallel() - k.ListStub = func(_ context.Context, l client.ObjectList, o ...client.ListOption) error { - s := o[0].(client.MatchingLabels) + stack := &lokiv1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + } - c, ok := s["app.kubernetes.io/component"] - if !ok || c == "ruler" { - return nil - } + k, _ := setupListClient(t, stack, tc.componentPods) - pods := v1.PodList{ - Items: []v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod-a", - }, - Status: v1.PodStatus{ - Phase: v1.PodPending, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod-b", - }, - Status: v1.PodStatus{ - Phase: v1.PodRunning, - }, - }, - }, - } - k.SetClientObjectList(l, &pods) - return nil - } + componentStatus, err := generateComponentStatus(context.Background(), k, stack) + require.NoError(t, err) + require.Equal(t, tc.wantComponentStatus, componentStatus) - sw.UpdateStub = func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { - stack := obj.(*lokiv1.LokiStack) - require.Equal(t, stack.Status.Components.Ruler, lokiv1.PodStatusMap{}) - return nil + // one list call for each component + require.Equal(t, 8, k.ListCallCount()) + }) } - - err := status.SetComponentsStatus(context.TODO(), k, r) - require.NoError(t, err) - require.NotZero(t, k.ListCallCount()) - require.NotZero(t, k.StatusCallCount()) - require.NotZero(t, sw.UpdateCallCount()) } diff --git a/operator/internal/status/lokistack.go b/operator/internal/status/lokistack.go index f0d06133720d..1a34d7865831 100644 --- a/operator/internal/status/lokistack.go +++ b/operator/internal/status/lokistack.go @@ -7,6 +7,7 @@ import ( "github.com/ViaQ/logerr/v2/kverrors" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/external/k8s" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/util/retry" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -20,51 +21,33 @@ const ( messagePending = "Some LokiStack components pending on dependencies" ) -// DegradedError contains information about why the managed LokiStack has an invalid configuration. -type DegradedError struct { - Message string - Reason lokiv1.LokiStackConditionReason - Requeue bool -} - -func (e *DegradedError) Error() string { - return fmt.Sprintf("cluster degraded: %s", e.Message) -} - -// SetReadyCondition updates or appends the condition Ready to the lokistack status conditions. -// In addition it resets all other Status conditions to false. -func SetReadyCondition(ctx context.Context, k k8s.Client, req ctrl.Request) error { - ready := metav1.Condition{ - Type: string(lokiv1.ConditionReady), - Message: messageReady, - Reason: string(lokiv1.ReasonReadyComponents), - } - - return updateCondition(ctx, k, req, ready) -} - -// SetFailedCondition updates or appends the condition Failed to the lokistack status conditions. -// In addition it resets all other Status conditions to false. -func SetFailedCondition(ctx context.Context, k k8s.Client, req ctrl.Request) error { - failed := metav1.Condition{ +var ( + conditionFailed = metav1.Condition{ Type: string(lokiv1.ConditionFailed), Message: messageFailed, Reason: string(lokiv1.ReasonFailedComponents), } - - return updateCondition(ctx, k, req, failed) -} - -// SetPendingCondition updates or appends the condition Pending to the lokistack status conditions. -// In addition it resets all other Status conditions to false. -func SetPendingCondition(ctx context.Context, k k8s.Client, req ctrl.Request) error { - pending := metav1.Condition{ + conditionPending = metav1.Condition{ Type: string(lokiv1.ConditionPending), Message: messagePending, Reason: string(lokiv1.ReasonPendingComponents), } + conditionReady = metav1.Condition{ + Type: string(lokiv1.ConditionReady), + Message: messageReady, + Reason: string(lokiv1.ReasonReadyComponents), + } +) + +// DegradedError contains information about why the managed LokiStack has an invalid configuration. +type DegradedError struct { + Message string + Reason lokiv1.LokiStackConditionReason + Requeue bool +} - return updateCondition(ctx, k, req, pending) +func (e *DegradedError) Error() string { + return fmt.Sprintf("cluster degraded: %s", e.Message) } // SetDegradedCondition appends the condition Degraded to the lokistack status conditions. @@ -78,6 +61,38 @@ func SetDegradedCondition(ctx context.Context, k k8s.Client, req ctrl.Request, m return updateCondition(ctx, k, req, degraded) } +func generateCondition(cs *lokiv1.LokiStackComponentStatus) metav1.Condition { + // Check for failed pods first + failed := len(cs.Compactor[corev1.PodFailed]) + + len(cs.Distributor[corev1.PodFailed]) + + len(cs.Ingester[corev1.PodFailed]) + + len(cs.Querier[corev1.PodFailed]) + + len(cs.QueryFrontend[corev1.PodFailed]) + + len(cs.Gateway[corev1.PodFailed]) + + len(cs.IndexGateway[corev1.PodFailed]) + + len(cs.Ruler[corev1.PodFailed]) + + if failed != 0 { + return conditionFailed + } + + // Check for pending pods + pending := len(cs.Compactor[corev1.PodPending]) + + len(cs.Distributor[corev1.PodPending]) + + len(cs.Ingester[corev1.PodPending]) + + len(cs.Querier[corev1.PodPending]) + + len(cs.QueryFrontend[corev1.PodPending]) + + len(cs.Gateway[corev1.PodPending]) + + len(cs.IndexGateway[corev1.PodPending]) + + len(cs.Ruler[corev1.PodPending]) + + if pending != 0 { + return conditionPending + } + + return conditionReady +} + func updateCondition(ctx context.Context, k k8s.Client, req ctrl.Request, condition metav1.Condition) error { var stack lokiv1.LokiStack if err := k.Get(ctx, req.NamespacedName, &stack); err != nil { diff --git a/operator/internal/status/lokistack_test.go b/operator/internal/status/lokistack_test.go index 4208cd9c2dea..2e2c64a6a1ae 100644 --- a/operator/internal/status/lokistack_test.go +++ b/operator/internal/status/lokistack_test.go @@ -6,9 +6,8 @@ import ( lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes" - "github.com/stretchr/testify/require" - + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -39,391 +38,6 @@ func setupFakesNoError(t *testing.T, stack *lokiv1.LokiStack) (*k8sfakes.FakeCli return k, sw } -func TestSetReadyCondition_WhenGetLokiStackReturnsError_ReturnError(t *testing.T) { - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k := &k8sfakes.FakeClient{} - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - return apierrors.NewBadRequest("something wasn't found") - } - - err := SetReadyCondition(context.Background(), k, r) - require.Error(t, err) -} - -func TestSetReadyCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.T) { - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k := &k8sfakes.FakeClient{} - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") - } - - err := SetReadyCondition(context.Background(), k, r) - require.NoError(t, err) -} - -func TestSetReadyCondition_WhenExisting_DoNothing(t *testing.T) { - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - }, - Status: lokiv1.LokiStackStatus{ - Conditions: []metav1.Condition{ - { - Type: string(lokiv1.ConditionReady), - Message: messageReady, - Reason: string(lokiv1.ReasonReadyComponents), - Status: metav1.ConditionTrue, - }, - }, - }, - } - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k, _ := setupFakesNoError(t, &s) - - err := SetReadyCondition(context.Background(), k, r) - require.NoError(t, err) - require.Zero(t, k.StatusCallCount()) -} - -func TestSetReadyCondition_WhenExisting_SetReadyConditionTrue(t *testing.T) { - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - }, - Status: lokiv1.LokiStackStatus{ - Conditions: []metav1.Condition{ - { - Type: string(lokiv1.ConditionReady), - Status: metav1.ConditionFalse, - }, - }, - }, - } - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k, sw := setupFakesNoError(t, &s) - - err := SetReadyCondition(context.Background(), k, r) - require.NoError(t, err) - - require.NotZero(t, k.StatusCallCount()) - require.NotZero(t, sw.UpdateCallCount()) -} - -func TestSetReadyCondition_WhenNoneExisting_AppendReadyCondition(t *testing.T) { - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k, sw := setupFakesNoError(t, &s) - - err := SetReadyCondition(context.Background(), k, r) - require.NoError(t, err) - - require.NotZero(t, k.StatusCallCount()) - require.NotZero(t, sw.UpdateCallCount()) -} - -func TestSetFailedCondition_WhenGetLokiStackReturnsError_ReturnError(t *testing.T) { - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k := &k8sfakes.FakeClient{} - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - return apierrors.NewBadRequest("something wasn't found") - } - - err := SetFailedCondition(context.Background(), k, r) - require.Error(t, err) -} - -func TestSetFailedCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.T) { - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k := &k8sfakes.FakeClient{} - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") - } - - err := SetFailedCondition(context.Background(), k, r) - require.NoError(t, err) -} - -func TestSetFailedCondition_WhenExisting_DoNothing(t *testing.T) { - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - }, - Status: lokiv1.LokiStackStatus{ - Conditions: []metav1.Condition{ - { - Type: string(lokiv1.ConditionFailed), - Reason: string(lokiv1.ReasonFailedComponents), - Message: messageFailed, - Status: metav1.ConditionTrue, - }, - }, - }, - } - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k, _ := setupFakesNoError(t, &s) - - err := SetFailedCondition(context.Background(), k, r) - require.NoError(t, err) - require.Zero(t, k.StatusCallCount()) -} - -func TestSetFailedCondition_WhenExisting_SetFailedConditionTrue(t *testing.T) { - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - }, - Status: lokiv1.LokiStackStatus{ - Conditions: []metav1.Condition{ - { - Type: string(lokiv1.ConditionFailed), - Status: metav1.ConditionFalse, - }, - }, - }, - } - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k, sw := setupFakesNoError(t, &s) - - err := SetFailedCondition(context.Background(), k, r) - require.NoError(t, err) - - require.NotZero(t, k.StatusCallCount()) - require.NotZero(t, sw.UpdateCallCount()) -} - -func TestSetFailedCondition_WhenNoneExisting_AppendFailedCondition(t *testing.T) { - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k, sw := setupFakesNoError(t, &s) - - err := SetFailedCondition(context.Background(), k, r) - require.NoError(t, err) - - require.NotZero(t, k.StatusCallCount()) - require.NotZero(t, sw.UpdateCallCount()) -} - -func TestSetDegradedCondition_WhenGetLokiStackReturnsError_ReturnError(t *testing.T) { - msg := "tell me nothing" - reason := lokiv1.ReasonMissingObjectStorageSecret - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k := &k8sfakes.FakeClient{} - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - return apierrors.NewBadRequest("something wasn't found") - } - - err := SetDegradedCondition(context.Background(), k, r, msg, reason) - require.Error(t, err) -} - -func TestSetPendingCondition_WhenGetLokiStackReturnsError_ReturnError(t *testing.T) { - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k := &k8sfakes.FakeClient{} - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - return apierrors.NewBadRequest("something wasn't found") - } - - err := SetPendingCondition(context.Background(), k, r) - require.Error(t, err) -} - -func TestSetPendingCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.T) { - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k := &k8sfakes.FakeClient{} - k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error { - return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found") - } - - err := SetPendingCondition(context.Background(), k, r) - require.NoError(t, err) -} - -func TestSetPendingCondition_WhenExisting_DoNothing(t *testing.T) { - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - }, - Status: lokiv1.LokiStackStatus{ - Conditions: []metav1.Condition{ - { - Type: string(lokiv1.ConditionPending), - Reason: string(lokiv1.ReasonPendingComponents), - Message: messagePending, - Status: metav1.ConditionTrue, - }, - }, - }, - } - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k, _ := setupFakesNoError(t, &s) - - err := SetPendingCondition(context.Background(), k, r) - require.NoError(t, err) - require.Zero(t, k.StatusCallCount()) -} - -func TestSetPendingCondition_WhenExisting_SetPendingConditionTrue(t *testing.T) { - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - }, - Status: lokiv1.LokiStackStatus{ - Conditions: []metav1.Condition{ - { - Type: string(lokiv1.ConditionPending), - Status: metav1.ConditionFalse, - }, - }, - }, - } - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k, sw := setupFakesNoError(t, &s) - - err := SetPendingCondition(context.Background(), k, r) - require.NoError(t, err) - require.NotZero(t, k.StatusCallCount()) - require.NotZero(t, sw.UpdateCallCount()) -} - -func TestSetPendingCondition_WhenNoneExisting_AppendPendingCondition(t *testing.T) { - s := lokiv1.LokiStack{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - r := ctrl.Request{ - NamespacedName: types.NamespacedName{ - Name: "my-stack", - Namespace: "some-ns", - }, - } - - k, sw := setupFakesNoError(t, &s) - - err := SetPendingCondition(context.Background(), k, r) - require.NoError(t, err) - - require.NotZero(t, k.StatusCallCount()) - require.NotZero(t, sw.UpdateCallCount()) -} - func TestSetDegradedCondition_WhenGetLokiStackReturnsNotFound_DoNothing(t *testing.T) { msg := "tell me nothing" reason := lokiv1.ReasonMissingObjectStorageSecret @@ -537,3 +151,49 @@ func TestSetDegradedCondition_WhenNoneExisting_AppendDegradedCondition(t *testin require.NotZero(t, k.StatusCallCount()) require.NotZero(t, sw.UpdateCallCount()) } + +func TestGenerateConditions(t *testing.T) { + tt := []struct { + desc string + componentStatus *lokiv1.LokiStackComponentStatus + wantCondition metav1.Condition + }{ + { + desc: "no error", + componentStatus: &lokiv1.LokiStackComponentStatus{}, + wantCondition: conditionReady, + }, + { + desc: "container pending", + componentStatus: &lokiv1.LokiStackComponentStatus{ + Ingester: map[corev1.PodPhase][]string{ + corev1.PodPending: { + "pod-0", + }, + }, + }, + wantCondition: conditionPending, + }, + { + desc: "container failed", + componentStatus: &lokiv1.LokiStackComponentStatus{ + Ingester: map[corev1.PodPhase][]string{ + corev1.PodFailed: { + "pod-0", + }, + }, + }, + wantCondition: conditionFailed, + }, + } + + for _, tc := range tt { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + t.Parallel() + + condition := generateCondition(tc.componentStatus) + require.Equal(t, tc.wantCondition, condition) + }) + } +} diff --git a/operator/internal/status/status.go b/operator/internal/status/status.go index 247aea4e325a..ca4e7c1bf301 100644 --- a/operator/internal/status/status.go +++ b/operator/internal/status/status.go @@ -2,12 +2,14 @@ package status import ( "context" + "time" "github.com/ViaQ/logerr/v2/kverrors" lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" "github.com/grafana/loki/operator/internal/external/k8s" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" ) @@ -15,56 +17,66 @@ import ( // Refresh executes an aggregate update of the LokiStack Status struct, i.e. // - It recreates the Status.Components pod status map per component. // - It sets the appropriate Status.Condition to true that matches the pod status maps. -func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request) error { - if err := SetComponentsStatus(ctx, k, req); err != nil { - return err - } - - var s lokiv1.LokiStack - if err := k.Get(ctx, req.NamespacedName, &s); err != nil { +func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request, now time.Time) error { + var stack lokiv1.LokiStack + if err := k.Get(ctx, req.NamespacedName, &stack); err != nil { if apierrors.IsNotFound(err) { return nil } return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.NamespacedName) } - cs := s.Status.Components + cs, err := generateComponentStatus(ctx, k, &stack) + if err != nil { + return err + } + + condition := generateCondition(cs) - // Check for failed pods first - failed := len(cs.Compactor[corev1.PodFailed]) + - len(cs.Distributor[corev1.PodFailed]) + - len(cs.Ingester[corev1.PodFailed]) + - len(cs.Querier[corev1.PodFailed]) + - len(cs.QueryFrontend[corev1.PodFailed]) + - len(cs.Gateway[corev1.PodFailed]) + - len(cs.IndexGateway[corev1.PodFailed]) + - len(cs.Ruler[corev1.PodFailed]) + condition.LastTransitionTime = metav1.NewTime(now) + condition.Status = metav1.ConditionTrue - unknown := len(cs.Compactor[corev1.PodUnknown]) + - len(cs.Distributor[corev1.PodUnknown]) + - len(cs.Ingester[corev1.PodUnknown]) + - len(cs.Querier[corev1.PodUnknown]) + - len(cs.QueryFrontend[corev1.PodUnknown]) + - len(cs.Gateway[corev1.PodUnknown]) + - len(cs.IndexGateway[corev1.PodUnknown]) + - len(cs.Ruler[corev1.PodUnknown]) + statusUpdater := func(stack *lokiv1.LokiStack) { + stack.Status.Components = *cs - if failed != 0 || unknown != 0 { - return SetFailedCondition(ctx, k, req) - } + index := -1 + for i := range stack.Status.Conditions { + // Reset all other conditions first + stack.Status.Conditions[i].Status = metav1.ConditionFalse + stack.Status.Conditions[i].LastTransitionTime = metav1.NewTime(now) - // Check for pending pods - pending := len(cs.Compactor[corev1.PodPending]) + - len(cs.Distributor[corev1.PodPending]) + - len(cs.Ingester[corev1.PodPending]) + - len(cs.Querier[corev1.PodPending]) + - len(cs.QueryFrontend[corev1.PodPending]) + - len(cs.Gateway[corev1.PodPending]) + - len(cs.IndexGateway[corev1.PodPending]) + - len(cs.Ruler[corev1.PodPending]) + // Locate existing pending condition if any + if stack.Status.Conditions[i].Type == condition.Type { + index = i + } + } - if pending != 0 { - return SetPendingCondition(ctx, k, req) + if index == -1 { + stack.Status.Conditions = append(stack.Status.Conditions, condition) + } else { + stack.Status.Conditions[index] = condition + } + } + + statusUpdater(&stack) + err = k.Status().Update(ctx, &stack) + switch { + case err == nil: + return nil + case apierrors.IsConflict(err): + // break into retry-logic below on conflict + break + case err != nil: + // return non-conflict errors + return err } - return SetReadyCondition(ctx, k, req) + + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + if err := k.Get(ctx, req.NamespacedName, &stack); err != nil { + return err + } + + statusUpdater(&stack) + return k.Status().Update(ctx, &stack) + }) } diff --git a/operator/internal/status/status_test.go b/operator/internal/status/status_test.go new file mode 100644 index 000000000000..81ecc15345a4 --- /dev/null +++ b/operator/internal/status/status_test.go @@ -0,0 +1,83 @@ +package status + +import ( + "context" + "testing" + "time" + + lokiv1 "github.com/grafana/loki/operator/apis/loki/v1" + "github.com/grafana/loki/operator/internal/manifests" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" +) + +func TestRefreshSuccess(t *testing.T) { + now := time.Now() + stack := &lokiv1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "my-stack", + Namespace: "some-ns", + }, + } + + componentPods := map[string]*corev1.PodList{ + manifests.LabelCompactorComponent: createPodList(manifests.LabelCompactorComponent, corev1.PodRunning), + manifests.LabelDistributorComponent: createPodList(manifests.LabelDistributorComponent, corev1.PodRunning), + manifests.LabelIngesterComponent: createPodList(manifests.LabelIngesterComponent, corev1.PodRunning), + manifests.LabelQuerierComponent: createPodList(manifests.LabelQuerierComponent, corev1.PodRunning), + manifests.LabelQueryFrontendComponent: createPodList(manifests.LabelQueryFrontendComponent, corev1.PodRunning), + manifests.LabelIndexGatewayComponent: createPodList(manifests.LabelIndexGatewayComponent, corev1.PodRunning), + manifests.LabelRulerComponent: createPodList(manifests.LabelRulerComponent, corev1.PodRunning), + manifests.LabelGatewayComponent: createPodList(manifests.LabelGatewayComponent, corev1.PodRunning), + } + + wantStatus := lokiv1.LokiStackStatus{ + Components: lokiv1.LokiStackComponentStatus{ + Compactor: map[corev1.PodPhase][]string{corev1.PodRunning: {"compactor-pod-0"}}, + Distributor: map[corev1.PodPhase][]string{corev1.PodRunning: {"distributor-pod-0"}}, + IndexGateway: map[corev1.PodPhase][]string{corev1.PodRunning: {"index-gateway-pod-0"}}, + Ingester: map[corev1.PodPhase][]string{corev1.PodRunning: {"ingester-pod-0"}}, + Querier: map[corev1.PodPhase][]string{corev1.PodRunning: {"querier-pod-0"}}, + QueryFrontend: map[corev1.PodPhase][]string{corev1.PodRunning: {"query-frontend-pod-0"}}, + Gateway: map[corev1.PodPhase][]string{corev1.PodRunning: {"lokistack-gateway-pod-0"}}, + Ruler: map[corev1.PodPhase][]string{corev1.PodRunning: {"ruler-pod-0"}}, + }, + Storage: lokiv1.LokiStackStorageStatus{}, + Conditions: []metav1.Condition{ + { + Type: string(lokiv1.ConditionReady), + Reason: string(lokiv1.ReasonReadyComponents), + Message: messageReady, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(now), + }, + }, + } + + k, sw := setupListClient(t, stack, componentPods) + + err := Refresh(context.Background(), k, req, now) + + require.NoError(t, err) + require.Equal(t, 1, k.GetCallCount()) + require.Equal(t, 8, k.ListCallCount()) + + require.Equal(t, 1, sw.UpdateCallCount()) + _, updated, _ := sw.UpdateArgsForCall(0) + updatedStack, ok := updated.(*lokiv1.LokiStack) + if !ok { + t.Fatalf("not a LokiStack: %T", updatedStack) + } + + require.Equal(t, wantStatus, updatedStack.Status) +} diff --git a/pkg/canary/comparator/comparator.go b/pkg/canary/comparator/comparator.go index f35fc024dd60..de1e50c559d7 100644 --- a/pkg/canary/comparator/comparator.go +++ b/pkg/canary/comparator/comparator.go @@ -249,8 +249,8 @@ func (c *Comparator) run() { t := time.NewTicker(c.pruneInterval) // Use a random tick up to the interval for the first tick firstMt := true - rand.Seed(time.Now().UnixNano()) - mt := time.NewTicker(time.Duration(rand.Int63n(c.metricTestInterval.Nanoseconds()))) + randomGenerator := rand.New(rand.NewSource(time.Now().UnixNano())) + mt := time.NewTicker(time.Duration(randomGenerator.Int63n(c.metricTestInterval.Nanoseconds()))) sc := time.NewTicker(c.spotCheckQueryRate) defer func() { t.Stop() diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go index 1485b462d750..0d67a98c1fcd 100644 --- a/pkg/chunkenc/memchunk_test.go +++ b/pkg/chunkenc/memchunk_test.go @@ -68,9 +68,9 @@ func TestBlocksInclusive(t *testing.T) { func TestBlock(t *testing.T) { for _, enc := range testEncoding { + enc := enc t.Run(enc.String(), func(t *testing.T) { t.Parallel() - chk := NewMemChunk(enc, DefaultHeadBlockFmt, testBlockSize, testTargetSize) cases := []struct { ts int64 @@ -180,6 +180,7 @@ func TestBlock(t *testing.T) { func TestCorruptChunk(t *testing.T) { for _, enc := range testEncoding { + enc := enc t.Run(enc.String(), func(t *testing.T) { t.Parallel() @@ -251,6 +252,8 @@ func TestRoundtripV2(t *testing.T) { for _, f := range HeadBlockFmts { for _, enc := range testEncoding { for _, version := range []byte{chunkFormatV2, chunkFormatV3} { + enc := enc + version := version t.Run(enc.String(), func(t *testing.T) { t.Parallel() @@ -308,6 +311,7 @@ func TestRoundtripV2(t *testing.T) { func TestRoundtripV3(t *testing.T) { for _, f := range HeadBlockFmts { for _, enc := range testEncoding { + enc := enc t.Run(fmt.Sprintf("%v-%v", f, enc), func(t *testing.T) { t.Parallel() @@ -331,6 +335,7 @@ func TestRoundtripV3(t *testing.T) { func TestSerialization(t *testing.T) { for _, f := range HeadBlockFmts { for _, enc := range testEncoding { + enc := enc t.Run(enc.String(), func(t *testing.T) { t.Parallel() @@ -382,6 +387,7 @@ func TestSerialization(t *testing.T) { func TestChunkFilling(t *testing.T) { for _, f := range HeadBlockFmts { for _, enc := range testEncoding { + enc := enc t.Run(enc.String(), func(t *testing.T) { t.Parallel() @@ -893,34 +899,26 @@ func TestMemChunk_IteratorBounds(t *testing.T) { expect []bool // array of expected values for next call in sequence }{ {time.Unix(0, 0), time.Unix(0, 1), logproto.FORWARD, []bool{false}}, - {time.Unix(0, 1), time.Unix(0, 1), logproto.FORWARD, []bool{true, false}}, {time.Unix(0, 1), time.Unix(0, 2), logproto.FORWARD, []bool{true, false}}, - {time.Unix(0, 2), time.Unix(0, 2), logproto.FORWARD, []bool{true, false}}, {time.Unix(0, 1), time.Unix(0, 3), logproto.FORWARD, []bool{true, true, false}}, {time.Unix(0, 2), time.Unix(0, 3), logproto.FORWARD, []bool{true, false}}, - {time.Unix(0, 3), time.Unix(0, 3), logproto.FORWARD, []bool{false}}, {time.Unix(0, 0), time.Unix(0, 1), logproto.BACKWARD, []bool{false}}, - {time.Unix(0, 1), time.Unix(0, 1), logproto.BACKWARD, []bool{true, false}}, {time.Unix(0, 1), time.Unix(0, 2), logproto.BACKWARD, []bool{true, false}}, - {time.Unix(0, 2), time.Unix(0, 2), logproto.BACKWARD, []bool{true, false}}, {time.Unix(0, 1), time.Unix(0, 3), logproto.BACKWARD, []bool{true, true, false}}, {time.Unix(0, 2), time.Unix(0, 3), logproto.BACKWARD, []bool{true, false}}, - {time.Unix(0, 3), time.Unix(0, 3), logproto.BACKWARD, []bool{false}}, } { t.Run( fmt.Sprintf("mint:%d,maxt:%d,direction:%s", tt.mint.UnixNano(), tt.maxt.UnixNano(), tt.direction), func(t *testing.T) { - t.Parallel() - tt := tt c := createChunk() // testing headchunk it, err := c.Iterator(context.Background(), tt.mint, tt.maxt, tt.direction, noopStreamPipeline) require.NoError(t, err) - for i := range tt.expect { - require.Equal(t, tt.expect[i], it.Next()) + for idx, expected := range tt.expect { + require.Equal(t, expected, it.Next(), "idx: %s", idx) } require.NoError(t, it.Close()) @@ -938,6 +936,7 @@ func TestMemChunk_IteratorBounds(t *testing.T) { func TestMemchunkLongLine(t *testing.T) { for _, enc := range testEncoding { + enc := enc t.Run(enc.String(), func(t *testing.T) { t.Parallel() diff --git a/pkg/configs/userconfig/config.go b/pkg/configs/userconfig/config.go index 52381a528cad..2bb33c824f26 100644 --- a/pkg/configs/userconfig/config.go +++ b/pkg/configs/userconfig/config.go @@ -305,6 +305,7 @@ func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) { rl.Alert.Value, expr, time.Duration(rl.For), + time.Duration(rl.KeepFiringFor), labels.FromMap(rl.Labels), labels.FromMap(rl.Annotations), nil, diff --git a/pkg/configs/userconfig/config_test.go b/pkg/configs/userconfig/config_test.go index f5a34f600e51..0b304f28288d 100644 --- a/pkg/configs/userconfig/config_test.go +++ b/pkg/configs/userconfig/config_test.go @@ -85,6 +85,7 @@ func TestParseLegacyAlerts(t *testing.T) { "TestAlert", parsed, 5*time.Minute, + 0, labels.Labels{ labels.Label{Name: "severity", Value: "critical"}, }, diff --git a/pkg/iter/entry_iterator.go b/pkg/iter/entry_iterator.go index ee26548e6658..5b57539f232b 100644 --- a/pkg/iter/entry_iterator.go +++ b/pkg/iter/entry_iterator.go @@ -4,13 +4,14 @@ import ( "container/heap" "context" "io" - "sort" + "math" "sync" "time" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logqlmodel/stats" "github.com/grafana/loki/pkg/util" + "github.com/grafana/loki/pkg/util/loser" ) // EntryIterator iterates over entries in time-order. @@ -327,11 +328,9 @@ func (i *mergeEntryIterator) Len() int { } type entrySortIterator struct { - is []EntryIterator - prefetched bool - byAscendingTime bool - currEntry entryWithLabels - errs []error + tree *loser.Tree[sortFields, EntryIterator] + currEntry entryWithLabels + errs []error } // NewSortEntryIterator returns a new EntryIterator that sorts entries by timestamp (depending on the direction) the input iterators. @@ -345,120 +344,79 @@ func NewSortEntryIterator(is []EntryIterator, direction logproto.Direction) Entr if len(is) == 1 { return is[0] } - result := &entrySortIterator{is: is} + maxVal, less := treeLess(direction) + result := &entrySortIterator{} + result.tree = loser.New(is, maxVal, sortFieldsAt, less, result.closeEntry) + return result +} + +func treeLess(direction logproto.Direction) (maxVal sortFields, less func(a, b sortFields) bool) { switch direction { case logproto.BACKWARD: - result.byAscendingTime = false + maxVal = sortFields{timeNanos: math.MinInt64} + less = lessDescending case logproto.FORWARD: - result.byAscendingTime = true + maxVal = sortFields{timeNanos: math.MaxInt64} + less = lessAscending default: panic("bad direction") } - return result + return } -func (i *entrySortIterator) lessByIndex(k, j int) bool { - t1, t2 := i.is[k].Entry().Timestamp.UnixNano(), i.is[j].Entry().Timestamp.UnixNano() - if t1 == t2 { - // The underlying stream hash may not be available, such as when merging LokiResponses in the - // frontend which were sharded. Prefer to use the underlying stream hash when available, - // which is needed in deduping code, but defer to label sorting when it's not present. - if i.is[k].StreamHash() == 0 { - return i.is[k].Labels() < i.is[j].Labels() - } - return i.is[k].StreamHash() < i.is[j].StreamHash() - } - if i.byAscendingTime { - return t1 < t2 - } - return t1 > t2 +type sortFields struct { + labels string + timeNanos int64 + streamHash uint64 } -func (i *entrySortIterator) lessByValue(t1 int64, l1 uint64, lb string, index int) bool { - t2 := i.is[index].Entry().Timestamp.UnixNano() - if t1 == t2 { - if l1 == 0 { - return lb < i.is[index].Labels() - } - return l1 < i.is[index].StreamHash() +func sortFieldsAt(i EntryIterator) sortFields { + return sortFields{ + timeNanos: i.Entry().Timestamp.UnixNano(), + labels: i.Labels(), + streamHash: i.StreamHash(), } - if i.byAscendingTime { - return t1 < t2 - } - return t1 > t2 } -// init throws out empty iterators and sorts them. -func (i *entrySortIterator) init() { - if i.prefetched { - return - } - - i.prefetched = true - tmp := make([]EntryIterator, 0, len(i.is)) - for _, it := range i.is { - if it.Next() { - tmp = append(tmp, it) - continue +func lessAscending(e1, e2 sortFields) bool { + if e1.timeNanos == e2.timeNanos { + // The underlying stream hash may not be available, such as when merging LokiResponses in the + // frontend which were sharded. Prefer to use the underlying stream hash when available, + // which is needed in deduping code, but defer to label sorting when it's not present. + if e1.streamHash == 0 { + return e1.labels < e2.labels } + return e1.streamHash < e2.streamHash + } + return e1.timeNanos < e2.timeNanos +} - if err := it.Error(); err != nil { - i.errs = append(i.errs, err) +func lessDescending(e1, e2 sortFields) bool { + if e1.timeNanos == e2.timeNanos { + if e1.streamHash == 0 { + return e1.labels < e2.labels } - util.LogError("closing iterator", it.Close) + return e1.streamHash < e2.streamHash } - i.is = tmp - sort.Slice(i.is, i.lessByIndex) + return e1.timeNanos > e2.timeNanos } -func (i *entrySortIterator) fix() { - head := i.is[0] - t1 := head.Entry().Timestamp.UnixNano() - l1 := head.StreamHash() - lb := head.Labels() - - // shortcut - if len(i.is) <= 1 || i.lessByValue(t1, l1, lb, 1) { - return +func (i *entrySortIterator) closeEntry(e EntryIterator) { + if err := e.Error(); err != nil { + i.errs = append(i.errs, err) } + util.LogError("closing iterator", e.Close) - // First element is out of place. So we reposition it. - i.is = i.is[1:] // drop head - index := sort.Search(len(i.is), func(in int) bool { return i.lessByValue(t1, l1, lb, in) }) - - if index == len(i.is) { - i.is = append(i.is, head) - } else { - i.is = append(i.is[:index+1], i.is[index:]...) - i.is[index] = head - } } - func (i *entrySortIterator) Next() bool { - i.init() - - if len(i.is) == 0 { + ret := i.tree.Next() + if !ret { return false } - - next := i.is[0] + next := i.tree.Winner() i.currEntry.Entry = next.Entry() i.currEntry.labels = next.Labels() i.currEntry.streamHash = next.StreamHash() - // if the top iterator is empty, we remove it. - if !next.Next() { - i.is = i.is[1:] - if err := next.Error(); err != nil { - i.errs = append(i.errs, err) - } - util.LogError("closing iterator", next.Close) - return true - } - - if len(i.is) > 1 { - i.fix() - } - return true } @@ -486,12 +444,8 @@ func (i *entrySortIterator) Error() error { } func (i *entrySortIterator) Close() error { - for _, entryIterator := range i.is { - if err := entryIterator.Close(); err != nil { - return err - } - } - return nil + i.tree.Close() + return i.Error() } // NewStreamsIterator returns an iterator over logproto.Stream diff --git a/pkg/logcli/output/default.go b/pkg/logcli/output/default.go index d69fab470439..d6edf3c30d34 100644 --- a/pkg/logcli/output/default.go +++ b/pkg/logcli/output/default.go @@ -32,7 +32,14 @@ func (o *DefaultOutput) FormatAndPrintln(ts time.Time, lbls loghttp.LabelSet, ma } else { fmt.Fprintf(o.w, "%s %s %s\n", color.BlueString(timestamp), color.RedString(padLabel(lbls, maxLabelsLen)), line) } +} +// WithWriter returns a copy of the LogOutput with the writer set to the given writer +func (o DefaultOutput) WithWriter(w io.Writer) LogOutput { + return &DefaultOutput{ + w: w, + options: o.options, + } } // add some padding after labels diff --git a/pkg/logcli/output/jsonl.go b/pkg/logcli/output/jsonl.go index b8cbe8615d7f..09f2616bfa4d 100644 --- a/pkg/logcli/output/jsonl.go +++ b/pkg/logcli/output/jsonl.go @@ -35,3 +35,11 @@ func (o *JSONLOutput) FormatAndPrintln(ts time.Time, lbls loghttp.LabelSet, maxL fmt.Fprintln(o.w, string(out)) } + +// WithWriter returns a copy of the LogOutput with the writer set to the given writer +func (o JSONLOutput) WithWriter(w io.Writer) LogOutput { + return &JSONLOutput{ + w: w, + options: o.options, + } +} diff --git a/pkg/logcli/output/output.go b/pkg/logcli/output/output.go index e1129c022529..92d3a214c230 100644 --- a/pkg/logcli/output/output.go +++ b/pkg/logcli/output/output.go @@ -29,6 +29,7 @@ var colorList = []*color.Color{ // LogOutput is the interface any output mode must implement type LogOutput interface { FormatAndPrintln(ts time.Time, lbls loghttp.LabelSet, maxLabelsLen int, line string) + WithWriter(w io.Writer) LogOutput } // LogOutputOptions defines options supported by LogOutput diff --git a/pkg/logcli/output/raw.go b/pkg/logcli/output/raw.go index 6aaa87c29056..e5397f64bc3f 100644 --- a/pkg/logcli/output/raw.go +++ b/pkg/logcli/output/raw.go @@ -28,3 +28,11 @@ func (o *RawOutput) FormatAndPrintln(ts time.Time, lbls loghttp.LabelSet, maxLab } fmt.Fprintln(o.w, line) } + +// WithWriter returns a copy of the LogOutput with the writer set to the given writer +func (o RawOutput) WithWriter(w io.Writer) LogOutput { + return &RawOutput{ + w: w, + options: o.options, + } +} diff --git a/pkg/logcli/query/part_file.go b/pkg/logcli/query/part_file.go new file mode 100644 index 000000000000..8cb450c1e3f4 --- /dev/null +++ b/pkg/logcli/query/part_file.go @@ -0,0 +1,108 @@ +package query + +import ( + "errors" + "fmt" + "os" + "sync" +) + +// PartFile partially complete file. +// Expected usage: +// 1. Create the temp file: CreateTemp +// 2. Write the data to the file +// 3. When you're done, call Finalize and the temp file will be closed and renamed +type PartFile struct { + finalName string + fd *os.File + lock sync.Mutex +} + +// NewPartFile initializes a new partial file object, setting the filename +// which will be used when the file is closed with the Finalize function. +// +// This will not create the file, call CreateTemp to create the file. +func NewPartFile(filename string) *PartFile { + return &PartFile{ + finalName: filename, + } +} + +// Exists checks if the completed file exists. +func (f *PartFile) Exists() (bool, error) { + if _, err := os.Stat(f.finalName); err == nil { + // No error means file exits, and we can stat it. + return true, nil + } else if errors.Is(err, os.ErrNotExist) { + // File does not exist. + return false, nil + } else { + // Unclear if file exists or not, we cannot stat it. + return false, fmt.Errorf("failed to check if part file exists: %s: %s", f.finalName, err) + } +} + +// CreateTempFile creates the temp file to store the data before Finalize is called. +func (f *PartFile) CreateTempFile() error { + f.lock.Lock() + defer f.lock.Unlock() + + tmpName := f.finalName + ".tmp" + + fd, err := os.Create(tmpName) + if err != nil { + return fmt.Errorf("Failed to create part file: %s: %s", tmpName, err) + } + + f.fd = fd + return nil +} + +// Write to the temporary file. +func (f *PartFile) Write(b []byte) (int, error) { + return f.fd.Write(b) +} + +// Close closes the temporary file. +// Double close is handled gracefully without error so that Close can be deferred for errors, +// and is also called when Finalize is called. +func (f *PartFile) Close() error { + f.lock.Lock() + defer f.lock.Unlock() + + // Prevent double close + if f.fd == nil { + return nil + } + + filename := f.fd.Name() + + if err := f.fd.Sync(); err != nil { + return fmt.Errorf("failed to fsync part file: %s: %s", filename, err) + } + + if err := f.fd.Close(); err != nil { + return fmt.Errorf("filed to close part file: %s: %s", filename, err) + } + + f.fd = nil + return nil +} + +// Finalize closes the temporary file, and renames it to the final file name. +func (f *PartFile) Finalize() error { + tmpFileName := f.fd.Name() + + if err := f.Close(); err != nil { + return fmt.Errorf("failed to close part file: %s: %s", tmpFileName, err) + } + + f.lock.Lock() + defer f.lock.Unlock() + + if err := os.Rename(tmpFileName, f.finalName); err != nil { + return fmt.Errorf("failed to rename part file: %s: %s", tmpFileName, err) + } + + return nil +} diff --git a/pkg/logcli/query/query.go b/pkg/logcli/query/query.go index d9b7bd90f5f4..e934c656bd65 100644 --- a/pkg/logcli/query/query.go +++ b/pkg/logcli/query/query.go @@ -5,10 +5,12 @@ import ( "errors" "flag" "fmt" + "io" "log" "os" "sort" "strings" + "sync" "text/tabwriter" "time" @@ -62,6 +64,31 @@ type Query struct { ColoredOutput bool LocalConfig string FetchSchemaFromStorage bool + + // Parallelization parameters. + + // The duration of each part/job. + ParallelDuration time.Duration + + // Number of workers to start. + ParallelMaxWorkers int + + // Path prefix of the name for each part file. + // The idea for this is to allow the user to download many different queries at the same + // time, and/or give a directory for the part files to be placed. + PartPathPrefix string + + // By default (false value), if the part file has finished downloading, and another job with + // the same filename is run, it will skip the completed files. This will remove the completed + // files as each worker gets to that part file, so the part will be downloaded again. + OverwriteCompleted bool + + // If true, the part files will be read in order, and the data will be output to stdout. + MergeParts bool + + // If MergeParts is false, this parameter has no effect, part files will be kept. + // Otherwise, if this is true, the part files will not be deleted once they have been merged. + KeepParts bool } // DoQuery executes the query and prints out the results @@ -82,6 +109,24 @@ func (q *Query) DoQuery(c client.Client, out output.LogOutput, statistics bool) var resp *loghttp.QueryResponse var err error + var partFile *PartFile + if q.PartPathPrefix != "" { + var shouldSkip bool + partFile, shouldSkip = q.createPartFile() + + // createPartFile will return true if the part file exists and + // OverwriteCompleted is false, therefor, we should exit the function + // here because we have nothing to do. + if shouldSkip { + return + } + } + + if partFile != nil { + defer partFile.Close() + out = out.WithWriter(partFile) + } + if q.isInstant() { resp, err = c.Query(q.QueryString, q.Limit, q.Start, d, q.Quiet) if err != nil { @@ -92,7 +137,9 @@ func (q *Query) DoQuery(c client.Client, out output.LogOutput, statistics bool) } _, _ = q.printResult(resp.Data.Result, out, nil) } else { - if q.Limit < q.BatchSize { + unlimited := q.Limit == 0 + + if q.Limit < q.BatchSize && !unlimited { q.BatchSize = q.Limit } resultLength := 0 @@ -100,11 +147,12 @@ func (q *Query) DoQuery(c client.Client, out output.LogOutput, statistics bool) start := q.Start end := q.End var lastEntry []*loghttp.Entry - for total < q.Limit { + for total < q.Limit || unlimited { bs := q.BatchSize // We want to truncate the batch size if the remaining number // of items needed to reach the limit is less than the batch size - if q.Limit-total < q.BatchSize { + // unless the query has no limit, ie limit==0. + if q.Limit-total < q.BatchSize && !unlimited { // Truncated batchsize is q.Limit - total, however we add to this // the length of the overlap from the last query to make sure we get the // correct amount of new logs knowing there will be some overlapping logs returned. @@ -157,9 +205,201 @@ func (q *Query) DoQuery(c client.Client, out output.LogOutput, statistics bool) // fudge the timestamp forward in time to make sure to get the last entry from this batch in the next query end = lastEntry[0].Timestamp.Add(1 * time.Nanosecond) } + } + } + + if partFile != nil { + if err := partFile.Finalize(); err != nil { + log.Fatalln(err) + } + } +} + +func (q *Query) outputFilename() string { + return fmt.Sprintf( + "%s_%s_%s.part", + q.PartPathPrefix, + q.Start.UTC().Format("20060102T150405"), + q.End.UTC().Format("20060102T150405"), + ) +} + +// createPartFile returns a PartFile. +// The bool value shows if the part file already exists, and this range should be skipped. +func (q *Query) createPartFile() (*PartFile, bool) { + partFile := NewPartFile(q.outputFilename()) + + if !q.OverwriteCompleted { + // If we already have the completed file, no need to download it again. + // The user can delete the files if they want to download parts again. + exists, err := partFile.Exists() + if err != nil { + log.Fatalf("Query failed: %s\n", err) + } + if exists { + log.Printf("Skip range: %s - %s: already downloaded\n", q.Start, q.End) + return nil, true + } + } + + if err := partFile.CreateTempFile(); err != nil { + log.Fatalf("Query failed: %s\n", err) + } + + return partFile, false +} + +// rounds up duration d by the multiple m, and then divides by m. +func ceilingDivision(d, m time.Duration) int64 { + return int64((d + m - 1) / m) +} + +// Returns the next job's start and end times. +func (q *Query) nextJob(start, end time.Time) (time.Time, time.Time) { + if q.Forward { + start = end + return start, minTime(start.Add(q.ParallelDuration), q.End) + } + + end = start + return maxTime(end.Add(-q.ParallelDuration), q.Start), end +} + +type parallelJob struct { + q *Query + done chan struct{} +} + +func newParallelJob(q *Query) *parallelJob { + return ¶llelJob{ + q: q, + done: make(chan struct{}, 1), + } +} +func (j *parallelJob) run(c client.Client, out output.LogOutput, statistics bool) { + j.q.DoQuery(c, out, statistics) + j.done <- struct{}{} +} + +func (q *Query) parallelJobs() []*parallelJob { + nJobs := ceilingDivision(q.End.Sub(q.Start), q.ParallelDuration) + jobs := make([]*parallelJob, nJobs) + + // Normally `nextJob` will swap the start/end to get the next job. Here, we swap them + // on input so that we calculate the starting job instead of the next job. + start, end := q.nextJob(q.End, q.Start) + + // Queue up jobs + for i := range jobs { + rq := *q + rq.Start = start + rq.End = end + + jobs[i] = newParallelJob(&rq) + + start, end = q.nextJob(start, end) + } + + return jobs +} + +// Waits for each job to finish in order, reads the part file and copies it to stdout +func (q *Query) mergeJobs(jobs []*parallelJob) error { + if !q.MergeParts { + return nil + } + + for _, job := range jobs { + // wait for the next job to finish + <-job.done + + f, err := os.Open(job.q.outputFilename()) + if err != nil { + return fmt.Errorf("open file error: %w", err) + } + defer f.Close() + + _, err = io.Copy(os.Stdout, f) + if err != nil { + return fmt.Errorf("copying file error: %w", err) } + + if !q.KeepParts { + err := os.Remove(job.q.outputFilename()) + if err != nil { + return fmt.Errorf("removing file error: %w", err) + } + } + } + + return nil +} + +// Starts `ParallelMaxWorkers` number of workers to process all of the `parallelJob`s +// This function is non-blocking. The caller should `Wait` on the returned `WaitGroup`. +func (q *Query) startWorkers( + jobs []*parallelJob, + c client.Client, + out output.LogOutput, + statistics bool, +) *sync.WaitGroup { + wg := sync.WaitGroup{} + jobsChan := make(chan *parallelJob, len(jobs)) + + // Queue up the jobs + // There is a possible optimization here to use an unbuffered channel, + // But the memory and CPU overhead for yet another go routine makes me + // think that this optimization is not worth it. So I used a buffered + // channel instead. + for _, job := range jobs { + jobsChan <- job + } + close(jobsChan) + + // Start workers + for w := 0; w < q.ParallelMaxWorkers; w++ { + wg.Add(1) + + go func() { + defer wg.Done() + for job := range jobsChan { + job.run(c, out, statistics) + } + }() + } + + return &wg +} + +func (q *Query) DoQueryParallel(c client.Client, out output.LogOutput, statistics bool) { + if q.ParallelDuration < 1 { + log.Fatalf("Parallel duration has to be a positive value\n") + } + + jobs := q.parallelJobs() + + wg := q.startWorkers(jobs, c, out, statistics) + + if err := q.mergeJobs(jobs); err != nil { + log.Fatalf("Merging part files error: %s\n", err) + } + + wg.Wait() +} + +func minTime(t1, t2 time.Time) time.Time { + if t1.Before(t2) { + return t1 + } + return t2 +} + +func maxTime(t1, t2 time.Time) time.Time { + if t1.After(t2) { + return t1 } + return t2 } func (q *Query) printResult(value loghttp.ResultValue, out output.LogOutput, lastEntry []*loghttp.Entry) (int, []*loghttp.Entry) { diff --git a/pkg/logcli/query/query_test.go b/pkg/logcli/query/query_test.go index 3de2537a25e4..29583ad8d81c 100644 --- a/pkg/logcli/query/query_test.go +++ b/pkg/logcli/query/query_test.go @@ -3,6 +3,7 @@ package query import ( "bytes" "context" + "fmt" "os" "path/filepath" "reflect" @@ -460,6 +461,46 @@ func Test_batch(t *testing.T) { "line10", "line9", "line8", "line7", "line6b", "line6a", "line6", "line5", "line4", "line3", "line2", }, }, + { + name: "single stream backward batch identical timestamps without limit", + streams: []logproto.Stream{ + { + Labels: "{test=\"simple\"}", + Entries: []logproto.Entry{ + {Timestamp: time.Unix(1, 0), Line: "line1"}, + {Timestamp: time.Unix(2, 0), Line: "line2"}, + {Timestamp: time.Unix(3, 0), Line: "line3"}, + {Timestamp: time.Unix(4, 0), Line: "line4"}, + {Timestamp: time.Unix(5, 0), Line: "line5"}, + {Timestamp: time.Unix(6, 0), Line: "line6"}, + {Timestamp: time.Unix(6, 0), Line: "line6a"}, + {Timestamp: time.Unix(6, 0), Line: "line6b"}, + {Timestamp: time.Unix(7, 0), Line: "line7"}, + {Timestamp: time.Unix(8, 0), Line: "line8"}, + {Timestamp: time.Unix(9, 0), Line: "line9"}, + {Timestamp: time.Unix(10, 0), Line: "line10"}, + }, + }, + }, + start: time.Unix(1, 0), + end: time.Unix(11, 0), + limit: 0, + batch: 4, + labelMatcher: "{test=\"simple\"}", + forward: false, + // Our batchsize is 2 but each query will also return the overlapping last element from the + // previous batch, as such we only get one item per call so we make a lot of calls + // Call one: line10 line9 line8 line7 + // Call two: line7 line6b line6a line6 + // Call three: line6b line6a line6 line5 + // Call four: line5 line5 line3 line2 + // Call five: line1 + // Call six: - + expectedCalls: 6, + expected: []string{ + "line10", "line9", "line8", "line7", "line6b", "line6a", "line6", "line5", "line4", "line3", "line2", "line1", + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -640,3 +681,229 @@ func TestLoadFromURL(t *testing.T) { require.NoError(t, err) require.NotNil(t, schemaConfig) } + +func TestDurationCeilDiv(t *testing.T) { + tests := []struct { + name string + d time.Duration + m time.Duration + expect int64 + }{ + { + "10m / 5m = 2", + 10 * time.Minute, + 5 * time.Minute, + 2, + }, + { + "11m / 5m = 3", + 11 * time.Minute, + 5 * time.Minute, + 3, + }, + { + "1h / 15m = 4", + 1 * time.Hour, + 15 * time.Minute, + 4, + }, + { + "1h / 14m = 5", + 1 * time.Hour, + 14 * time.Minute, + 5, + }, + } + + for _, tt := range tests { + t.Run( + tt.name, + func(t *testing.T) { + require.Equal(t, tt.expect, ceilingDivision(tt.d, tt.m)) + }, + ) + } +} + +func mustParseTime(value string) time.Time { + t, err := time.Parse("2006-01-02 15:04:05", value) + if err != nil { + panic(fmt.Errorf("invalid timestamp: %w", err)) + } + + return t +} + +func cmpParallelJobSlice(t *testing.T, expected, actual []*parallelJob) { + require.Equal(t, len(expected), len(actual), "job slice lengths don't match") + + for i, jobE := range expected { + jobA := actual[i] + + require.Equal(t, jobE.q.Start, jobA.q.Start, "i=%d: job start not equal", i) + require.Equal(t, jobE.q.End, jobA.q.End, "i=%d: job end not equal", i) + require.Equal(t, jobE.q.Forward, jobA.q.Forward, "i=%d: job direction not equal", i) + } +} + +func TestParallelJobs(t *testing.T) { + mkQuery := func(start, end string, d time.Duration, forward bool) *Query { + return &Query{ + Start: mustParseTime(start), + End: mustParseTime(end), + ParallelDuration: d, + Forward: forward, + } + } + + mkParallelJob := func(start, end string, forward bool) *parallelJob { + return ¶llelJob{ + q: mkQuery(start, end, time.Minute, forward), + } + } + + tests := []struct { + name string + q *Query + jobs []*parallelJob + }{ + { + "1h range, 30m period, forward", + mkQuery( + "2023-02-10 15:00:00", + "2023-02-10 16:00:00", + 30*time.Minute, + true, + ), + []*parallelJob{ + mkParallelJob( + "2023-02-10 15:00:00", + "2023-02-10 15:30:00", + true, + ), + mkParallelJob( + "2023-02-10 15:30:00", + "2023-02-10 16:00:00", + true, + ), + }, + }, + { + "1h range, 30m period, reverse", + mkQuery( + "2023-02-10 15:00:00", + "2023-02-10 16:00:00", + 30*time.Minute, + false, + ), + []*parallelJob{ + mkParallelJob( + "2023-02-10 15:30:00", + "2023-02-10 16:00:00", + false, + ), + mkParallelJob( + "2023-02-10 15:00:00", + "2023-02-10 15:30:00", + false, + ), + }, + }, + { + "1h1m range, 30m period, forward", + mkQuery( + "2023-02-10 15:00:00", + "2023-02-10 16:01:00", + 30*time.Minute, + true, + ), + []*parallelJob{ + mkParallelJob( + "2023-02-10 15:00:00", + "2023-02-10 15:30:00", + true, + ), + mkParallelJob( + "2023-02-10 15:30:00", + "2023-02-10 16:00:00", + true, + ), + mkParallelJob( + "2023-02-10 16:00:00", + "2023-02-10 16:01:00", + true, + ), + }, + }, + { + "1h1m range, 30m period, reverse", + mkQuery( + "2023-02-10 15:00:00", + "2023-02-10 16:01:00", + 30*time.Minute, + false, + ), + []*parallelJob{ + mkParallelJob( + "2023-02-10 15:31:00", + "2023-02-10 16:01:00", + false, + ), + mkParallelJob( + "2023-02-10 15:01:00", + "2023-02-10 15:31:00", + false, + ), + mkParallelJob( + "2023-02-10 15:00:00", + "2023-02-10 15:01:00", + false, + ), + }, + }, + { + "15m range, 30m period, forward", + mkQuery( + "2023-02-10 15:00:00", + "2023-02-10 15:15:00", + 30*time.Minute, + true, + ), + []*parallelJob{ + mkParallelJob( + "2023-02-10 15:00:00", + "2023-02-10 15:15:00", + true, + ), + }, + }, + { + "15m range, 30m period, reverse", + mkQuery( + "2023-02-10 15:00:00", + "2023-02-10 15:15:00", + 30*time.Minute, + false, + ), + []*parallelJob{ + mkParallelJob( + "2023-02-10 15:00:00", + "2023-02-10 15:15:00", + false, + ), + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run( + tt.name, + func(t *testing.T) { + jobs := tt.q.parallelJobs() + cmpParallelJobSlice(t, tt.jobs, jobs) + }, + ) + } +} diff --git a/pkg/logql/downstream_test.go b/pkg/logql/downstream_test.go index aa7e0b791497..cd3964cbb990 100644 --- a/pkg/logql/downstream_test.go +++ b/pkg/logql/downstream_test.go @@ -49,6 +49,7 @@ func TestMappingEquivalence(t *testing.T) { {`sum(max(rate({a=~".+"}[1s])))`, false}, {`max(count(rate({a=~".+"}[1s])))`, false}, {`max(sum by (cluster) (rate({a=~".+"}[1s]))) / count(rate({a=~".+"}[1s]))`, false}, + {`sum(rate({a=~".+"} |= "foo" != "foo"[1s]) or vector(1))`, false}, // topk prefers already-seen values in tiebreakers. Since the test data generates // the same log lines for each series & the resulting promql.Vectors aren't deterministically // sorted by labels, we don't expect this to pass. diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index d5934807339c..4863ea79e028 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -430,7 +430,10 @@ func (q *query) evalVector(_ context.Context, expr *syntax.VectorExpr) (promql_p } if GetRangeType(q.params) == InstantType { - return s, nil + return promql.Vector{promql.Sample{ + Point: promql.Point{T: q.params.Start().UnixMilli(), V: value}, + Metric: labels.Labels{}, + }}, nil } return PopulateMatrixFromScalar(s, q.params), nil diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go index bb1945909ddd..52830d83c582 100644 --- a/pkg/logql/engine_test.go +++ b/pkg/logql/engine_test.go @@ -643,7 +643,10 @@ func TestEngine_LogsInstantQuery(t *testing.T) { time.Unix(60, 0), logproto.FORWARD, 100, nil, nil, - promql.Scalar{T: 60 * 1000, V: 2}, + promql.Vector{promql.Sample{ + Point: promql.Point{T: 60 * 1000, V: 2}, + Metric: labels.Labels{}, + }}, }, { // single comparison @@ -2302,6 +2305,37 @@ func TestEngine_LogsInstantQuery_IllegalLogql(t *testing.T) { require.EqualError(t, err, expectEvalSampleErr.Error()) } +func TestEngine_LogsInstantQuery_Vector(t *testing.T) { + eng := NewEngine(EngineOpts{}, &statsQuerier{}, NoLimits, log.NewNopLogger()) + now := time.Now() + queueTime := 2 * time.Nanosecond + logqlVector := `vector(5)` + q := eng.Query(LiteralParams{ + qs: logqlVector, + start: now, + end: now, + step: 0, + interval: time.Second * 30, + direction: logproto.BACKWARD, + limit: 1000, + }) + ctx := context.WithValue(context.Background(), httpreq.QueryQueueTimeHTTPHeader, queueTime) + _, err := q.Exec(user.InjectOrgID(ctx, "fake")) + + require.NoError(t, err) + + qry, ok := q.(*query) + require.Equal(t, ok, true) + vectorExpr := syntax.NewVectorExpr("5") + + data, err := qry.evalSample(ctx, vectorExpr) + require.NoError(t, err) + result, ok := data.(promql.Vector) + require.Equal(t, ok, true) + require.Equal(t, result[0].V, float64(5)) + require.Equal(t, result[0].T, now.UnixNano()/int64(time.Millisecond)) +} + type errorIteratorQuerier struct { samples []iter.SampleIterator entries []iter.EntryIterator @@ -2358,15 +2392,15 @@ func TestStepEvaluator_Error(t *testing.T) { } for _, tc := range tests { + tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() - tc := tc eng := NewEngine(EngineOpts{}, tc.querier, NoLimits, log.NewNopLogger()) q := eng.Query(LiteralParams{ qs: tc.qs, start: time.Unix(0, 0), end: time.Unix(180, 0), step: 1 * time.Second, + limit: 1, }) _, err := q.Exec(user.InjectOrgID(context.Background(), "fake")) require.Equal(t, tc.err, err) diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go index d26b2fcb11d5..7c3a51e979ee 100644 --- a/pkg/logql/evaluator.go +++ b/pkg/logql/evaluator.go @@ -232,7 +232,7 @@ func (ev *DefaultEvaluator) StepEvaluator( if err != nil { return nil, err } - return newVectorIterator(val, q.Step().Nanoseconds(), q.Start().UnixNano(), q.End().UnixNano()), nil + return newVectorIterator(val, q.Step().Milliseconds(), q.Start().UnixMilli(), q.End().UnixMilli()), nil default: return nil, EvaluatorUnsupportedType(e, ev) } @@ -944,34 +944,34 @@ func literalStepEvaluator( // vectorIterator return simple vector like (1). type vectorIterator struct { - step, end, current int64 - val float64 + stepMs, endMs, currentMs int64 + val float64 } func newVectorIterator(val float64, - step, start, end int64) *vectorIterator { - if step == 0 { - step = 1 + stepMs, startMs, endMs int64) *vectorIterator { + if stepMs == 0 { + stepMs = 1 } return &vectorIterator{ - val: val, - step: step, - end: end, - current: start - step, + val: val, + stepMs: stepMs, + endMs: endMs, + currentMs: startMs - stepMs, } } func (r *vectorIterator) Next() (bool, int64, promql.Vector) { - r.current = r.current + r.step - if r.current > r.end { + r.currentMs = r.currentMs + r.stepMs + if r.currentMs > r.endMs { return false, 0, nil } results := make(promql.Vector, 0) vectorPoint := promql.Sample{ - Point: promql.Point{T: r.current, V: r.val}, + Point: promql.Point{T: r.currentMs, V: r.val}, } results = append(results, vectorPoint) - return true, r.current, results + return true, r.currentMs, results } func (r *vectorIterator) Close() error { diff --git a/pkg/logql/log/ip.go b/pkg/logql/log/ip.go index 56a6067adc58..ad1419f7ec41 100644 --- a/pkg/logql/log/ip.go +++ b/pkg/logql/log/ip.go @@ -6,7 +6,8 @@ import ( "unicode" "github.com/prometheus/prometheus/model/labels" - "inet.af/netaddr" + "go4.org/netipx" + "net/netip" ) var ( @@ -21,7 +22,7 @@ const ( IPv6Charset = "0123456789abcdefABCDEF:." ) -// Should be one of the netaddr.IP, netaddr.IPRange, netadd.IPPrefix. +// Should be one of the netip.Addr, netip.Prefix, netipx.IPRange. type IPMatcher interface{} type IPLineFilter struct { @@ -190,7 +191,7 @@ func (f *ipFilter) filter(line []byte) bool { if iplen < 0 { return false, 0 } - ip, err := netaddr.ParseIP(string(line[start : start+iplen])) + ip, err := netip.ParseAddr(string(line[start : start+iplen])) if err == nil { if containsIP(f.matcher, ip) { return true, 0 @@ -223,13 +224,13 @@ func (f *ipFilter) filter(line []byte) bool { return false } -func containsIP(matcher IPMatcher, ip netaddr.IP) bool { +func containsIP(matcher IPMatcher, ip netip.Addr) bool { switch m := matcher.(type) { - case netaddr.IP: + case netip.Addr: return m.Compare(ip) == 0 - case netaddr.IPRange: + case netipx.IPRange: return m.Contains(ip) - case netaddr.IPPrefix: + case netip.Prefix: return m.Contains(ip) } return false @@ -241,16 +242,16 @@ func getMatcher(pattern string) (IPMatcher, error) { err error ) - matcher, err = netaddr.ParseIP(pattern) // is it simple single IP? + matcher, err = netip.ParseAddr(pattern) // is it simple single IP? if err == nil { return matcher, nil } - matcher, err = netaddr.ParseIPPrefix(pattern) // is it cidr format? (192.168.0.1/16) + matcher, err = netip.ParsePrefix(pattern) // is it cidr format? (192.168.0.1/16) if err == nil { return matcher, nil } - matcher, err = netaddr.ParseIPRange(pattern) // is it IP range format? (192.168.0.1 - 192.168.4.5 + matcher, err = netipx.ParseIPRange(pattern) // is it IP range format? (192.168.0.1 - 192.168.4.5 if err == nil { return matcher, nil } diff --git a/pkg/logql/log/json_expr.go b/pkg/logql/log/json_expr.go deleted file mode 100644 index 81207e13fd05..000000000000 --- a/pkg/logql/log/json_expr.go +++ /dev/null @@ -1,13 +0,0 @@ -package log - -type JSONExpression struct { - Identifier string - Expression string -} - -func NewJSONExpr(identifier, expression string) JSONExpression { - return JSONExpression{ - Identifier: identifier, - Expression: expression, - } -} diff --git a/pkg/logql/log/label_extraction_expr.go b/pkg/logql/log/label_extraction_expr.go new file mode 100644 index 000000000000..79a0ab7162d0 --- /dev/null +++ b/pkg/logql/log/label_extraction_expr.go @@ -0,0 +1,13 @@ +package log + +type LabelExtractionExpr struct { + Identifier string + Expression string +} + +func NewLabelExtractionExpr(identifier, expression string) LabelExtractionExpr { + return LabelExtractionExpr{ + Identifier: identifier, + Expression: expression, + } +} diff --git a/pkg/logql/log/logfmt/lexer.go b/pkg/logql/log/logfmt/lexer.go new file mode 100644 index 000000000000..06756c0bfea6 --- /dev/null +++ b/pkg/logql/log/logfmt/lexer.go @@ -0,0 +1,125 @@ +package logfmt + +import ( + "bufio" + "fmt" + "io" + "text/scanner" +) + +type Scanner struct { + buf *bufio.Reader + data []interface{} + err error + debug bool +} + +func NewScanner(r io.Reader, debug bool) *Scanner { + return &Scanner{ + buf: bufio.NewReader(r), + debug: debug, + } +} + +func (sc *Scanner) Error(s string) { + sc.err = fmt.Errorf(s) + fmt.Printf("syntax error: %s\n", s) +} + +func (sc *Scanner) Reduced(rule, state int, lval *LogfmtExprSymType) bool { + if sc.debug { + fmt.Printf("rule: %v; state %v; lval: %v\n", rule, state, lval) + } + return false +} + +func (sc *Scanner) Lex(lval *LogfmtExprSymType) int { + return sc.lex(lval) +} + +func (sc *Scanner) lex(lval *LogfmtExprSymType) int { + for { + r := sc.read() + + if r == 0 { + return 0 + } + if isWhitespace(r) { + continue + } + + switch true { + case isStartIdentifier(r): + sc.unread() + lval.key = sc.scanField() + return KEY + case r == '"': + sc.unread() + lval.str = sc.scanStr() + return STRING + default: + sc.err = fmt.Errorf("unexpected char %c", r) + return 0 + } + } +} + +func isStartIdentifier(r rune) bool { + return (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || r == '_' +} + +func isIdentifier(r rune) bool { + return isStartIdentifier(r) || (r >= '0' && r <= '9') +} + +func (sc *Scanner) scanField() string { + var str []rune + + for { + r := sc.read() + if !isIdentifier(r) || isEndOfInput(r) { + sc.unread() + break + } + + str = append(str, r) + } + return string(str) +} + +// input is either terminated by EOF or null byte +func isEndOfInput(r rune) bool { + return r == scanner.EOF || r == rune(0) +} + +func (sc *Scanner) read() rune { + ch, _, _ := sc.buf.ReadRune() + return ch +} + +func (sc *Scanner) scanStr() string { + var str []rune + //begin with ", end with " + r := sc.read() + if r != '"' { + sc.err = fmt.Errorf("unexpected char %c", r) + return "" + } + + for { + r := sc.read() + if isEndOfInput(r) { + break + } + + if r == '"' || r == ']' { + break + } + str = append(str, r) + } + return string(str) +} + +func (sc *Scanner) unread() { _ = sc.buf.UnreadRune() } + +func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' } diff --git a/pkg/logql/log/logfmt/logfmtexpr.y b/pkg/logql/log/logfmt/logfmtexpr.y new file mode 100644 index 000000000000..12660f3113a9 --- /dev/null +++ b/pkg/logql/log/logfmt/logfmtexpr.y @@ -0,0 +1,39 @@ + + +%{ +package logfmt + +func setScannerData(lex interface{}, data []interface{}) { + lex.(*Scanner).data = data +} + +%} + +%union { + str string + key string + list []interface{} +} + +%token STRING +%token KEY + +%type key value +%type expressions + +%% + +logfmt: + expressions { setScannerData(LogfmtExprlex, $1) } + +expressions: + key { $$ = []interface{}{$1} } + | value { $$ = []interface{}{$1} } + | expressions value { $$ = append($1, $2) } + ; + +key: + KEY { $$ = $1 } + +value: + STRING { $$ = $1 } \ No newline at end of file diff --git a/pkg/logql/log/logfmt/logfmtexpr.y.go b/pkg/logql/log/logfmt/logfmtexpr.y.go new file mode 100644 index 000000000000..77eb1d2362f2 --- /dev/null +++ b/pkg/logql/log/logfmt/logfmtexpr.y.go @@ -0,0 +1,465 @@ +// Code generated by goyacc -p LogfmtExpr -o logfmtexpr.y.go logfmtexpr.y. DO NOT EDIT. + +//line logfmtexpr.y:4 +package logfmt + +import __yyfmt__ "fmt" + +//line logfmtexpr.y:4 + +func setScannerData(lex interface{}, data []interface{}) { + lex.(*Scanner).data = data +} + +//line logfmtexpr.y:12 +type LogfmtExprSymType struct { + yys int + str string + key string + list []interface{} +} + +const STRING = 57346 +const KEY = 57347 + +var LogfmtExprToknames = [...]string{ + "$end", + "error", + "$unk", + "STRING", + "KEY", +} + +var LogfmtExprStatenames = [...]string{} + +const LogfmtExprEofCode = 1 +const LogfmtExprErrCode = 2 +const LogfmtExprInitialStackSize = 16 + +//line yacctab:1 +var LogfmtExprExca = [...]int8{ + -1, 1, + 1, -1, + -2, 0, +} + +const LogfmtExprPrivate = 57344 + +const LogfmtExprLast = 8 + +var LogfmtExprAct = [...]int8{ + 6, 5, 6, 4, 1, 2, 7, 3, +} + +var LogfmtExprPact = [...]int16{ + -4, -1000, -2, -1000, -1000, -1000, -1000, -1000, +} + +var LogfmtExprPgo = [...]int8{ + 0, 7, 3, 5, 4, +} + +var LogfmtExprR1 = [...]int8{ + 0, 4, 3, 3, 3, 1, 2, +} + +var LogfmtExprR2 = [...]int8{ + 0, 1, 1, 1, 2, 1, 1, +} + +var LogfmtExprChk = [...]int16{ + -1000, -4, -3, -1, -2, 5, 4, -2, +} + +var LogfmtExprDef = [...]int8{ + 0, -2, 1, 2, 3, 5, 6, 4, +} + +var LogfmtExprTok1 = [...]int8{ + 1, +} + +var LogfmtExprTok2 = [...]int8{ + 2, 3, 4, 5, +} + +var LogfmtExprTok3 = [...]int8{ + 0, +} + +var LogfmtExprErrorMessages = [...]struct { + state int + token int + msg string +}{} + +//line yaccpar:1 + +/* parser for yacc output */ + +var ( + LogfmtExprDebug = 0 + LogfmtExprErrorVerbose = false +) + +type LogfmtExprLexer interface { + Lex(lval *LogfmtExprSymType) int + Error(s string) +} + +type LogfmtExprParser interface { + Parse(LogfmtExprLexer) int + Lookahead() int +} + +type LogfmtExprParserImpl struct { + lval LogfmtExprSymType + stack [LogfmtExprInitialStackSize]LogfmtExprSymType + char int +} + +func (p *LogfmtExprParserImpl) Lookahead() int { + return p.char +} + +func LogfmtExprNewParser() LogfmtExprParser { + return &LogfmtExprParserImpl{} +} + +const LogfmtExprFlag = -1000 + +func LogfmtExprTokname(c int) string { + if c >= 1 && c-1 < len(LogfmtExprToknames) { + if LogfmtExprToknames[c-1] != "" { + return LogfmtExprToknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func LogfmtExprStatname(s int) string { + if s >= 0 && s < len(LogfmtExprStatenames) { + if LogfmtExprStatenames[s] != "" { + return LogfmtExprStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func LogfmtExprErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !LogfmtExprErrorVerbose { + return "syntax error" + } + + for _, e := range LogfmtExprErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + LogfmtExprTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := int(LogfmtExprPact[state]) + for tok := TOKSTART; tok-1 < len(LogfmtExprToknames); tok++ { + if n := base + tok; n >= 0 && n < LogfmtExprLast && int(LogfmtExprChk[int(LogfmtExprAct[n])]) == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if LogfmtExprDef[state] == -2 { + i := 0 + for LogfmtExprExca[i] != -1 || int(LogfmtExprExca[i+1]) != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; LogfmtExprExca[i] >= 0; i += 2 { + tok := int(LogfmtExprExca[i]) + if tok < TOKSTART || LogfmtExprExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if LogfmtExprExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += LogfmtExprTokname(tok) + } + return res +} + +func LogfmtExprlex1(lex LogfmtExprLexer, lval *LogfmtExprSymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = int(LogfmtExprTok1[0]) + goto out + } + if char < len(LogfmtExprTok1) { + token = int(LogfmtExprTok1[char]) + goto out + } + if char >= LogfmtExprPrivate { + if char < LogfmtExprPrivate+len(LogfmtExprTok2) { + token = int(LogfmtExprTok2[char-LogfmtExprPrivate]) + goto out + } + } + for i := 0; i < len(LogfmtExprTok3); i += 2 { + token = int(LogfmtExprTok3[i+0]) + if token == char { + token = int(LogfmtExprTok3[i+1]) + goto out + } + } + +out: + if token == 0 { + token = int(LogfmtExprTok2[1]) /* unknown char */ + } + if LogfmtExprDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", LogfmtExprTokname(token), uint(char)) + } + return char, token +} + +func LogfmtExprParse(LogfmtExprlex LogfmtExprLexer) int { + return LogfmtExprNewParser().Parse(LogfmtExprlex) +} + +func (LogfmtExprrcvr *LogfmtExprParserImpl) Parse(LogfmtExprlex LogfmtExprLexer) int { + var LogfmtExprn int + var LogfmtExprVAL LogfmtExprSymType + var LogfmtExprDollar []LogfmtExprSymType + _ = LogfmtExprDollar // silence set and not used + LogfmtExprS := LogfmtExprrcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + LogfmtExprstate := 0 + LogfmtExprrcvr.char = -1 + LogfmtExprtoken := -1 // LogfmtExprrcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + LogfmtExprstate = -1 + LogfmtExprrcvr.char = -1 + LogfmtExprtoken = -1 + }() + LogfmtExprp := -1 + goto LogfmtExprstack + +ret0: + return 0 + +ret1: + return 1 + +LogfmtExprstack: + /* put a state and value onto the stack */ + if LogfmtExprDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", LogfmtExprTokname(LogfmtExprtoken), LogfmtExprStatname(LogfmtExprstate)) + } + + LogfmtExprp++ + if LogfmtExprp >= len(LogfmtExprS) { + nyys := make([]LogfmtExprSymType, len(LogfmtExprS)*2) + copy(nyys, LogfmtExprS) + LogfmtExprS = nyys + } + LogfmtExprS[LogfmtExprp] = LogfmtExprVAL + LogfmtExprS[LogfmtExprp].yys = LogfmtExprstate + +LogfmtExprnewstate: + LogfmtExprn = int(LogfmtExprPact[LogfmtExprstate]) + if LogfmtExprn <= LogfmtExprFlag { + goto LogfmtExprdefault /* simple state */ + } + if LogfmtExprrcvr.char < 0 { + LogfmtExprrcvr.char, LogfmtExprtoken = LogfmtExprlex1(LogfmtExprlex, &LogfmtExprrcvr.lval) + } + LogfmtExprn += LogfmtExprtoken + if LogfmtExprn < 0 || LogfmtExprn >= LogfmtExprLast { + goto LogfmtExprdefault + } + LogfmtExprn = int(LogfmtExprAct[LogfmtExprn]) + if int(LogfmtExprChk[LogfmtExprn]) == LogfmtExprtoken { /* valid shift */ + LogfmtExprrcvr.char = -1 + LogfmtExprtoken = -1 + LogfmtExprVAL = LogfmtExprrcvr.lval + LogfmtExprstate = LogfmtExprn + if Errflag > 0 { + Errflag-- + } + goto LogfmtExprstack + } + +LogfmtExprdefault: + /* default state action */ + LogfmtExprn = int(LogfmtExprDef[LogfmtExprstate]) + if LogfmtExprn == -2 { + if LogfmtExprrcvr.char < 0 { + LogfmtExprrcvr.char, LogfmtExprtoken = LogfmtExprlex1(LogfmtExprlex, &LogfmtExprrcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if LogfmtExprExca[xi+0] == -1 && int(LogfmtExprExca[xi+1]) == LogfmtExprstate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + LogfmtExprn = int(LogfmtExprExca[xi+0]) + if LogfmtExprn < 0 || LogfmtExprn == LogfmtExprtoken { + break + } + } + LogfmtExprn = int(LogfmtExprExca[xi+1]) + if LogfmtExprn < 0 { + goto ret0 + } + } + if LogfmtExprn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + LogfmtExprlex.Error(LogfmtExprErrorMessage(LogfmtExprstate, LogfmtExprtoken)) + Nerrs++ + if LogfmtExprDebug >= 1 { + __yyfmt__.Printf("%s", LogfmtExprStatname(LogfmtExprstate)) + __yyfmt__.Printf(" saw %s\n", LogfmtExprTokname(LogfmtExprtoken)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for LogfmtExprp >= 0 { + LogfmtExprn = int(LogfmtExprPact[LogfmtExprS[LogfmtExprp].yys]) + LogfmtExprErrCode + if LogfmtExprn >= 0 && LogfmtExprn < LogfmtExprLast { + LogfmtExprstate = int(LogfmtExprAct[LogfmtExprn]) /* simulate a shift of "error" */ + if int(LogfmtExprChk[LogfmtExprstate]) == LogfmtExprErrCode { + goto LogfmtExprstack + } + } + + /* the current p has no shift on "error", pop stack */ + if LogfmtExprDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", LogfmtExprS[LogfmtExprp].yys) + } + LogfmtExprp-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if LogfmtExprDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", LogfmtExprTokname(LogfmtExprtoken)) + } + if LogfmtExprtoken == LogfmtExprEofCode { + goto ret1 + } + LogfmtExprrcvr.char = -1 + LogfmtExprtoken = -1 + goto LogfmtExprnewstate /* try again in the same state */ + } + } + + /* reduction by production LogfmtExprn */ + if LogfmtExprDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", LogfmtExprn, LogfmtExprStatname(LogfmtExprstate)) + } + + LogfmtExprnt := LogfmtExprn + LogfmtExprpt := LogfmtExprp + _ = LogfmtExprpt // guard against "declared and not used" + + LogfmtExprp -= int(LogfmtExprR2[LogfmtExprn]) + // LogfmtExprp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if LogfmtExprp+1 >= len(LogfmtExprS) { + nyys := make([]LogfmtExprSymType, len(LogfmtExprS)*2) + copy(nyys, LogfmtExprS) + LogfmtExprS = nyys + } + LogfmtExprVAL = LogfmtExprS[LogfmtExprp+1] + + /* consult goto table to find next state */ + LogfmtExprn = int(LogfmtExprR1[LogfmtExprn]) + LogfmtExprg := int(LogfmtExprPgo[LogfmtExprn]) + LogfmtExprj := LogfmtExprg + LogfmtExprS[LogfmtExprp].yys + 1 + + if LogfmtExprj >= LogfmtExprLast { + LogfmtExprstate = int(LogfmtExprAct[LogfmtExprg]) + } else { + LogfmtExprstate = int(LogfmtExprAct[LogfmtExprj]) + if int(LogfmtExprChk[LogfmtExprstate]) != -LogfmtExprn { + LogfmtExprstate = int(LogfmtExprAct[LogfmtExprg]) + } + } + // dummy call; replaced with literal code + switch LogfmtExprnt { + + case 1: + LogfmtExprDollar = LogfmtExprS[LogfmtExprpt-1 : LogfmtExprpt+1] +//line logfmtexpr.y:27 + { + setScannerData(LogfmtExprlex, LogfmtExprDollar[1].list) + } + case 2: + LogfmtExprDollar = LogfmtExprS[LogfmtExprpt-1 : LogfmtExprpt+1] +//line logfmtexpr.y:30 + { + LogfmtExprVAL.list = []interface{}{LogfmtExprDollar[1].str} + } + case 3: + LogfmtExprDollar = LogfmtExprS[LogfmtExprpt-1 : LogfmtExprpt+1] +//line logfmtexpr.y:31 + { + LogfmtExprVAL.list = []interface{}{LogfmtExprDollar[1].str} + } + case 4: + LogfmtExprDollar = LogfmtExprS[LogfmtExprpt-2 : LogfmtExprpt+1] +//line logfmtexpr.y:32 + { + LogfmtExprVAL.list = append(LogfmtExprDollar[1].list, LogfmtExprDollar[2].str) + } + case 5: + LogfmtExprDollar = LogfmtExprS[LogfmtExprpt-1 : LogfmtExprpt+1] +//line logfmtexpr.y:36 + { + LogfmtExprVAL.str = LogfmtExprDollar[1].key + } + case 6: + LogfmtExprDollar = LogfmtExprS[LogfmtExprpt-1 : LogfmtExprpt+1] +//line logfmtexpr.y:39 + { + LogfmtExprVAL.str = LogfmtExprDollar[1].str + } + } + goto LogfmtExprstack /* stack new state and value */ +} diff --git a/pkg/logql/log/logfmt/logfmtexpr_test.go b/pkg/logql/log/logfmt/logfmtexpr_test.go new file mode 100644 index 000000000000..a45ad9c26343 --- /dev/null +++ b/pkg/logql/log/logfmt/logfmtexpr_test.go @@ -0,0 +1,65 @@ +package logfmt + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLogfmtExpressionParser(t *testing.T) { + // `app=foo level=error spaces="value with ÜFT8👌" ts=2021-02-12T19:18:10.037940878Z` + + tests := []struct { + name string + expression string + want []interface{} + error error + }{ + { + "single field", + "app", + []interface{}{"app"}, + nil, + }, + { + "empty", + ``, + nil, + nil, + }, + { + "field with UTF8", + "fieldwithÜFT8👌", + nil, + fmt.Errorf("unexpected char Ü"), + }, + { + "invalid field with spaces", + `field with spaces`, + nil, + fmt.Errorf("syntax error: unexpected KEY"), + }, + { + "identifier with number", + `id8`, + []interface{}{"id8"}, + nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parsed, err := Parse(tt.expression, false) + + require.Equal(t, tt.want, parsed) + + if tt.error == nil { + return + } + + require.NotNil(t, err) + require.Equal(t, tt.error.Error(), err.Error()) + }) + } + +} diff --git a/pkg/logql/log/logfmt/parser.go b/pkg/logql/log/logfmt/parser.go new file mode 100644 index 000000000000..5a72ce1de0f7 --- /dev/null +++ b/pkg/logql/log/logfmt/parser.go @@ -0,0 +1,19 @@ +package logfmt + +import ( + "strings" +) + +func init() { + LogfmtExprErrorVerbose = true +} + +func Parse(expr string, debug bool) ([]interface{}, error) { + s := NewScanner(strings.NewReader(expr), debug) + LogfmtExprParse(s) + + if s.err != nil { + return nil, s.err + } + return s.data, nil +} diff --git a/pkg/logql/log/parser.go b/pkg/logql/log/parser.go index caa841f5503f..811e7f639b8f 100644 --- a/pkg/logql/log/parser.go +++ b/pkg/logql/log/parser.go @@ -341,13 +341,113 @@ func (l *PatternParser) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byt func (l *PatternParser) RequiredLabelNames() []string { return []string{} } +type LogfmtExpressionParser struct { + expressions map[string][]interface{} + dec *logfmt.Decoder + keys internedStringSet +} + +func NewLogfmtExpressionParser(expressions []LabelExtractionExpr) (*LogfmtExpressionParser, error) { + if len(expressions) == 0 { + return nil, fmt.Errorf("no logfmt expression provided") + } + paths := make(map[string][]interface{}, len(expressions)) + + for _, exp := range expressions { + path, err := logfmt.Parse(exp.Expression, false) + if err != nil { + return nil, fmt.Errorf("cannot parse expression [%s]: %w", exp.Expression, err) + } + + if !model.LabelName(exp.Identifier).IsValid() { + return nil, fmt.Errorf("invalid extracted label name '%s'", exp.Identifier) + } + paths[exp.Identifier] = path + } + return &LogfmtExpressionParser{ + expressions: paths, + dec: logfmt.NewDecoder(nil), + keys: internedStringSet{}, + }, nil +} + +func (l *LogfmtExpressionParser) Process(_ int64, line []byte, lbs *LabelsBuilder) ([]byte, bool) { + // If there are no expressions, extract common labels + // and add the suffix "_extracted" + if len(l.expressions) == 0 { + return line, false + } + + if lbs.ParserLabelHints().NoLabels() { + return line, true + } + + // Create a map of every renamed label and its original name + // in order to retrieve it later in the extraction phase + keys := make(map[string]string, len(l.expressions)) + for id, paths := range l.expressions { + keys[id] = fmt.Sprintf("%v", paths...) + if !lbs.BaseHas(id) { + lbs.Set(id, "") + } + } + + l.dec.Reset(line) + var current []byte + for l.dec.ScanKeyval() { + current = l.dec.Key() + key, ok := l.keys.Get(current, func() (string, bool) { + sanitized := sanitizeLabelKey(string(current), true) + if len(sanitized) == 0 { + return "", false + } + + if !lbs.ParserLabelHints().ShouldExtract(sanitized) { + return "", false + } + return sanitized, true + }) + if !ok { + continue + } + val := l.dec.Value() + + for id, orig := range keys { + if key == orig { + key = id + break + } + } + + if bytes.ContainsRune(val, utf8.RuneError) { + val = nil + } + + if _, ok := l.expressions[key]; ok { + if lbs.BaseHas(key) { + key = key + duplicateSuffix + } + lbs.Set(key, string(val)) + } + } + if l.dec.Err() != nil { + lbs.SetErr(errLogfmt) + lbs.SetErrorDetails(l.dec.Err().Error()) + return line, true + } + + return line, true +} + +func (l *LogfmtExpressionParser) RequiredLabelNames() []string { return []string{} } + type JSONExpressionParser struct { expressions map[string][]interface{} keys internedStringSet } -func NewJSONExpressionParser(expressions []JSONExpression) (*JSONExpressionParser, error) { +func NewJSONExpressionParser(expressions []LabelExtractionExpr) (*JSONExpressionParser, error) { paths := make(map[string][]interface{}) for _, exp := range expressions { @@ -390,7 +490,6 @@ func (j *JSONExpressionParser) Process(_ int64, line []byte, lbs *LabelsBuilder) lbs.Set(key, result) } - return line, true } diff --git a/pkg/logql/log/parser_test.go b/pkg/logql/log/parser_test.go index 3d18e02c4246..bf6487677f39 100644 --- a/pkg/logql/log/parser_test.go +++ b/pkg/logql/log/parser_test.go @@ -117,15 +117,15 @@ func TestJSONExpressionParser(t *testing.T) { tests := []struct { name string line []byte - expressions []JSONExpression + expressions []LabelExtractionExpr lbs labels.Labels want labels.Labels }{ { "single field", testLine, - []JSONExpression{ - NewJSONExpr("app", "app"), + []LabelExtractionExpr{ + NewLabelExtractionExpr("app", "app"), }, labels.Labels{}, labels.Labels{ @@ -135,8 +135,8 @@ func TestJSONExpressionParser(t *testing.T) { { "alternate syntax", testLine, - []JSONExpression{ - NewJSONExpr("test", `["field with space"]`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("test", `["field with space"]`), }, labels.Labels{}, labels.Labels{ @@ -146,9 +146,9 @@ func TestJSONExpressionParser(t *testing.T) { { "multiple fields", testLine, - []JSONExpression{ - NewJSONExpr("app", "app"), - NewJSONExpr("namespace", "namespace"), + []LabelExtractionExpr{ + NewLabelExtractionExpr("app", "app"), + NewLabelExtractionExpr("namespace", "namespace"), }, labels.Labels{}, labels.Labels{ @@ -159,8 +159,8 @@ func TestJSONExpressionParser(t *testing.T) { { "utf8", testLine, - []JSONExpression{ - NewJSONExpr("utf8", `["field with ÜFT8👌"]`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("utf8", `["field with ÜFT8👌"]`), }, labels.Labels{}, labels.Labels{ @@ -170,8 +170,8 @@ func TestJSONExpressionParser(t *testing.T) { { "nested field", testLine, - []JSONExpression{ - NewJSONExpr("uuid", "pod.uuid"), + []LabelExtractionExpr{ + NewLabelExtractionExpr("uuid", "pod.uuid"), }, labels.Labels{}, labels.Labels{ @@ -181,8 +181,8 @@ func TestJSONExpressionParser(t *testing.T) { { "nested field alternate syntax", testLine, - []JSONExpression{ - NewJSONExpr("uuid", `pod["uuid"]`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("uuid", `pod["uuid"]`), }, labels.Labels{}, labels.Labels{ @@ -192,8 +192,8 @@ func TestJSONExpressionParser(t *testing.T) { { "nested field alternate syntax 2", testLine, - []JSONExpression{ - NewJSONExpr("uuid", `["pod"]["uuid"]`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("uuid", `["pod"]["uuid"]`), }, labels.Labels{}, labels.Labels{ @@ -203,8 +203,8 @@ func TestJSONExpressionParser(t *testing.T) { { "nested field alternate syntax 3", testLine, - []JSONExpression{ - NewJSONExpr("uuid", `["pod"].uuid`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("uuid", `["pod"].uuid`), }, labels.Labels{}, labels.Labels{ @@ -214,8 +214,8 @@ func TestJSONExpressionParser(t *testing.T) { { "array element", testLine, - []JSONExpression{ - NewJSONExpr("param", `pod.deployment.params[0]`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("param", `pod.deployment.params[0]`), }, labels.Labels{}, labels.Labels{ @@ -225,8 +225,8 @@ func TestJSONExpressionParser(t *testing.T) { { "full array", testLine, - []JSONExpression{ - NewJSONExpr("params", `pod.deployment.params`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("params", `pod.deployment.params`), }, labels.Labels{}, labels.Labels{ @@ -236,8 +236,8 @@ func TestJSONExpressionParser(t *testing.T) { { "full object", testLine, - []JSONExpression{ - NewJSONExpr("deployment", `pod.deployment`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("deployment", `pod.deployment`), }, labels.Labels{}, labels.Labels{ @@ -247,8 +247,8 @@ func TestJSONExpressionParser(t *testing.T) { { "expression matching nothing", testLine, - []JSONExpression{ - NewJSONExpr("nope", `pod.nope`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("nope", `pod.nope`), }, labels.Labels{}, labels.Labels{ @@ -258,8 +258,8 @@ func TestJSONExpressionParser(t *testing.T) { { "null field", testLine, - []JSONExpression{ - NewJSONExpr("nf", `null_field`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("nf", `null_field`), }, labels.Labels{}, labels.Labels{ @@ -269,8 +269,8 @@ func TestJSONExpressionParser(t *testing.T) { { "boolean field", testLine, - []JSONExpression{ - NewJSONExpr("bool", `bool_field`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("bool", `bool_field`), }, labels.Labels{}, labels.Labels{ @@ -280,8 +280,8 @@ func TestJSONExpressionParser(t *testing.T) { { "label override", testLine, - []JSONExpression{ - NewJSONExpr("uuid", `pod.uuid`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("uuid", `pod.uuid`), }, labels.Labels{ {Name: "uuid", Value: "bar"}, @@ -294,8 +294,8 @@ func TestJSONExpressionParser(t *testing.T) { { "non-matching expression", testLine, - []JSONExpression{ - NewJSONExpr("request_size", `request.size.invalid`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("request_size", `request.size.invalid`), }, labels.Labels{ {Name: "uuid", Value: "bar"}, @@ -308,8 +308,8 @@ func TestJSONExpressionParser(t *testing.T) { { "empty line", []byte("{}"), - []JSONExpression{ - NewJSONExpr("uuid", `pod.uuid`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("uuid", `pod.uuid`), }, labels.Labels{}, labels.Labels{ @@ -319,8 +319,8 @@ func TestJSONExpressionParser(t *testing.T) { { "existing labels are not affected", testLine, - []JSONExpression{ - NewJSONExpr("uuid", `will.not.work`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("uuid", `will.not.work`), }, labels.Labels{ {Name: "foo", Value: "bar"}, @@ -333,8 +333,8 @@ func TestJSONExpressionParser(t *testing.T) { { "invalid JSON line", []byte(`invalid json`), - []JSONExpression{ - NewJSONExpr("uuid", `will.not.work`), + []LabelExtractionExpr{ + NewLabelExtractionExpr("uuid", `will.not.work`), }, labels.Labels{ {Name: "foo", Value: "bar"}, @@ -350,7 +350,6 @@ func TestJSONExpressionParser(t *testing.T) { if err != nil { t.Fatalf("cannot create JSON expression parser: %s", err.Error()) } - t.Run(tt.name, func(t *testing.T) { b := NewBaseLabelsBuilder().ForLabels(tt.lbs, tt.lbs.Hash()) b.Reset() @@ -364,38 +363,38 @@ func TestJSONExpressionParser(t *testing.T) { func TestJSONExpressionParserFailures(t *testing.T) { tests := []struct { name string - expression JSONExpression + expression LabelExtractionExpr error string }{ { "invalid field name", - NewJSONExpr("app", `field with space`), + NewLabelExtractionExpr("app", `field with space`), "unexpected FIELD", }, { "missing opening square bracket", - NewJSONExpr("app", `"pod"]`), + NewLabelExtractionExpr("app", `"pod"]`), "unexpected STRING, expecting LSB or FIELD", }, { "missing closing square bracket", - NewJSONExpr("app", `["pod"`), + NewLabelExtractionExpr("app", `["pod"`), "unexpected $end, expecting RSB", }, { "missing closing square bracket", - NewJSONExpr("app", `["pod""uuid"]`), + NewLabelExtractionExpr("app", `["pod""uuid"]`), "unexpected STRING, expecting RSB", }, { "invalid nesting", - NewJSONExpr("app", `pod..uuid`), + NewLabelExtractionExpr("app", `pod..uuid`), "unexpected DOT, expecting FIELD", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, err := NewJSONExpressionParser([]JSONExpression{tt.expression}) + _, err := NewJSONExpressionParser([]LabelExtractionExpr{tt.expression}) require.NotNil(t, err) require.Equal(t, err.Error(), fmt.Sprintf("cannot parse expression [%s]: syntax error: %s", tt.expression.Expression, tt.error)) @@ -722,6 +721,171 @@ func Test_logfmtParser_Parse(t *testing.T) { } } +func TestLogfmtExpressionParser(t *testing.T) { + testLine := []byte(`app=foo level=error spaces="value with ÜFT8👌" ts=2021-02-12T19:18:10.037940878Z`) + + tests := []struct { + name string + line []byte + expressions []LabelExtractionExpr + lbs labels.Labels + want labels.Labels + }{ + { + "single field", + testLine, + []LabelExtractionExpr{ + NewLabelExtractionExpr("app", "app"), + }, + labels.Labels{}, + labels.Labels{ + {Name: "app", Value: "foo"}, + }, + }, + { + "multiple fields", + testLine, + []LabelExtractionExpr{ + NewLabelExtractionExpr("app", "app"), + NewLabelExtractionExpr("level", "level"), + NewLabelExtractionExpr("ts", "ts"), + }, + labels.Labels{}, + labels.Labels{ + {Name: "app", Value: "foo"}, + {Name: "level", Value: "error"}, + {Name: "ts", Value: "2021-02-12T19:18:10.037940878Z"}, + }, + }, + { + "label renaming", + testLine, + []LabelExtractionExpr{ + NewLabelExtractionExpr("test", "level"), + }, + labels.Labels{}, + labels.Labels{ + {Name: "test", Value: "error"}, + }, + }, + { + "multiple fields with label renaming", + testLine, + []LabelExtractionExpr{ + NewLabelExtractionExpr("app", "app"), + NewLabelExtractionExpr("lvl", "level"), + NewLabelExtractionExpr("timestamp", "ts"), + }, + labels.Labels{}, + labels.Labels{ + {Name: "app", Value: "foo"}, + {Name: "lvl", Value: "error"}, + {Name: "timestamp", Value: "2021-02-12T19:18:10.037940878Z"}, + }, + }, + { + "value with spaces and ÜFT8👌", + testLine, + []LabelExtractionExpr{ + NewLabelExtractionExpr("spaces", "spaces"), + }, + labels.Labels{}, + labels.Labels{ + {Name: "spaces", Value: "value with ÜFT8👌"}, + }, + }, + { + "expression matching nothing", + testLine, + []LabelExtractionExpr{ + NewLabelExtractionExpr("nope", "nope"), + }, + labels.Labels{}, + labels.Labels{ + {Name: "nope", Value: ""}, + }, + }, + { + "double property logfmt", + testLine, + []LabelExtractionExpr{ + NewLabelExtractionExpr("app", "app"), + }, + labels.Labels{ + {Name: "ap", Value: "bar"}, + }, + labels.Labels{ + {Name: "ap", Value: "bar"}, + {Name: "app", Value: "foo"}, + }, + }, + { + "label override", + testLine, + []LabelExtractionExpr{ + NewLabelExtractionExpr("app", "app"), + }, + labels.Labels{ + {Name: "app", Value: "bar"}, + }, + labels.Labels{ + {Name: "app", Value: "bar"}, + {Name: "app_extracted", Value: "foo"}, + }, + }, + { + "label override 2", + testLine, + []LabelExtractionExpr{ + NewLabelExtractionExpr("lvl", "level"), + }, + labels.Labels{ + {Name: "level", Value: "debug"}, + }, + labels.Labels{ + {Name: "level", Value: "debug"}, + {Name: "lvl", Value: "error"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l, err := NewLogfmtExpressionParser(tt.expressions) + if err != nil { + t.Fatalf("cannot create logfmt expression parser: %s", err.Error()) + } + b := NewBaseLabelsBuilder().ForLabels(tt.lbs, tt.lbs.Hash()) + b.Reset() + _, _ = l.Process(0, tt.line, b) + sort.Sort(tt.want) + require.Equal(t, tt.want, b.LabelsResult().Labels()) + }) + } +} + +func TestXExpressionParserFailures(t *testing.T) { + tests := []struct { + name string + expression LabelExtractionExpr + error string + }{ + { + "invalid field name", + NewLabelExtractionExpr("app", `field with space`), + "unexpected KEY", + }, + } + for _, tt := range tests { + + t.Run(tt.name, func(t *testing.T) { + _, err := NewLogfmtExpressionParser([]LabelExtractionExpr{tt.expression}) + + require.NotNil(t, err) + require.Equal(t, err.Error(), fmt.Sprintf("cannot parse expression [%s]: syntax error: %s", tt.expression.Expression, tt.error)) + }) + } +} + func Test_unpackParser_Parse(t *testing.T) { tests := []struct { name string diff --git a/pkg/logql/log/pipeline_test.go b/pkg/logql/log/pipeline_test.go index e0149321aa32..0a014e84af9e 100644 --- a/pkg/logql/log/pipeline_test.go +++ b/pkg/logql/log/pipeline_test.go @@ -388,8 +388,8 @@ func BenchmarkJSONParserInvalidLine(b *testing.B) { } func BenchmarkJSONExpressionParser(b *testing.B) { - parser, err := NewJSONExpressionParser([]JSONExpression{ - NewJSONExpr("context_file", "context.file"), + parser, err := NewJSONExpressionParser([]LabelExtractionExpr{ + NewLabelExtractionExpr("context_file", "context.file"), }) if err != nil { b.Fatal("cannot create new JSON expression parser") @@ -399,8 +399,8 @@ func BenchmarkJSONExpressionParser(b *testing.B) { } func BenchmarkJSONExpressionParserInvalidLine(b *testing.B) { - parser, err := NewJSONExpressionParser([]JSONExpression{ - NewJSONExpr("context_file", "some.expression"), + parser, err := NewJSONExpressionParser([]LabelExtractionExpr{ + NewLabelExtractionExpr("context_file", "some.expression"), }) if err != nil { b.Fatal("cannot create new JSON expression parser") @@ -408,3 +408,47 @@ func BenchmarkJSONExpressionParserInvalidLine(b *testing.B) { invalidJSONBenchmark(b, parser) } + +func logfmtBenchmark(b *testing.B, parser Stage) { + b.ReportAllocs() + + p := NewPipeline([]Stage{ + mustFilter(NewFilter("ts", labels.MatchEqual)).ToStage(), + parser, + }) + + line := []byte(`level=info ts=2020-10-18T18:04:22.147378997Z caller=metrics.go:81 org_id=29 traceID=29a0f088b047eb8c latency=fast query="{stream=\"stdout\",pod=\"loki-canary-xmjzp\"}" query_type=limited range_type=range length=20s step=1s duration=58.126671ms status=200 throughput_mb=2.496547 total_bytes_mb=0.145116`) + lbs := labels.Labels{ + {Name: "cluster", Value: "ops-tool1"}, + {Name: "name", Value: "querier"}, + {Name: "ts", Value: "2020-10-18T18:04:22.147378997Z"}, + } + b.ResetTimer() + sp := p.ForStream(lbs) + for n := 0; n < b.N; n++ { + resLine, resLbs, resMatches = sp.Process(0, line) + + if !resMatches { + b.Fatalf("resulting line not ok: %s\n", line) + } + + if resLbs.Labels().Get("ts") != "2020-10-18T18:04:22.147378997Z" { + b.Fatalf("label was not extracted correctly! %+v\n", resLbs) + } + } +} + +func BenchmarkLogfmtParser(b *testing.B) { + logfmtBenchmark(b, NewLogfmtParser()) +} + +func BenchmarkLogfmtExpressionParser(b *testing.B) { + parser, err := NewLogfmtExpressionParser([]LabelExtractionExpr{ + NewLabelExtractionExpr("timestamp", "ts"), + }) + if err != nil { + b.Fatal("cannot create new logfmt expression parser:", err.Error()) + } + + logfmtBenchmark(b, parser) +} diff --git a/pkg/logql/optimize.go b/pkg/logql/optimize.go index eefba61e7012..0a3b3f86f37f 100644 --- a/pkg/logql/optimize.go +++ b/pkg/logql/optimize.go @@ -65,6 +65,10 @@ func removeLineformat(expr syntax.SampleExpr) { found = true break } + if _, ok := pipelineExpr.MultiStages[j].(*syntax.LogfmtExpressionParser); ok { + found = true + break + } } if found { // we cannot remove safely the linefmtExpr. diff --git a/pkg/logql/optimize_test.go b/pkg/logql/optimize_test.go index b4de878976ad..b4005e6d1f15 100644 --- a/pkg/logql/optimize_test.go +++ b/pkg/logql/optimize_test.go @@ -15,18 +15,18 @@ func Test_optimizeSampleExpr(t *testing.T) { // noop {`1`, `1`}, {`1 + 1`, `2`}, - {`topk(10,sum by(name)(rate({region="us-east1"}[5m])))`, `topk(10,sum by(name)(rate({region="us-east1"}[5m])))`}, - {`sum by(name)(rate({region="us-east1"}[5m]))`, `sum by(name)(rate({region="us-east1"}[5m]))`}, - {`sum by(name)(bytes_over_time({region="us-east1"} | line_format "something else"[5m]))`, `sum by(name)(bytes_over_time({region="us-east1"} | line_format "something else"[5m]))`}, - {`sum by(name)(rate({region="us-east1"} | json | line_format "something else" |= "something"[5m]))`, `sum by(name)(rate({region="us-east1"} | json | line_format "something else" |= "something"[5m]))`}, - {`sum by(name)(rate({region="us-east1"} | json | line_format "something else" | logfmt[5m]))`, `sum by(name)(rate({region="us-east1"} | json | line_format "something else" | logfmt[5m]))`}, - {`sum by(name)(count_over_time({region="us-east1"} | line_format "{{ .message }}" | json foo="bar"[5m]))`, `sum by(name)(count_over_time({region="us-east1"} | line_format "{{ .message }}" | json foo="bar"[5m]))`}, + {`topk(10,sum by(name)(rate({region="us-east1"}[5m])))`, `topk(10,sum by (name)(rate({region="us-east1"}[5m])))`}, + {`sum by(name)(rate({region="us-east1"}[5m]))`, `sum by (name)(rate({region="us-east1"}[5m]))`}, + {`sum by(name)(bytes_over_time({region="us-east1"} | line_format "something else"[5m]))`, `sum by (name)(bytes_over_time({region="us-east1"} | line_format "something else"[5m]))`}, + {`sum by(name)(rate({region="us-east1"} | json | line_format "something else" |= "something"[5m]))`, `sum by (name)(rate({region="us-east1"} | json | line_format "something else" |= "something"[5m]))`}, + {`sum by(name)(rate({region="us-east1"} | json | line_format "something else" | logfmt[5m]))`, `sum by (name)(rate({region="us-east1"} | json | line_format "something else" | logfmt[5m]))`}, + {`sum by(name)(count_over_time({region="us-east1"} | line_format "{{ .message }}" | json foo="bar"[5m]))`, `sum by (name)(count_over_time({region="us-east1"} | line_format "{{ .message }}" | json foo="bar"[5m]))`}, // remove line_format that is not required. - {`sum by(name)(rate({region="us-east1"} | line_format "something else"[5m]))`, `sum by(name)(rate({region="us-east1"}[5m]))`}, - {`sum by(name)(rate({region="us-east1"} | json | line_format "something else" | unwrap foo[5m]))`, `sum by(name)(rate({region="us-east1"} | json | unwrap foo[5m]))`}, + {`sum by(name)(rate({region="us-east1"} | line_format "something else"[5m]))`, `sum by (name)(rate({region="us-east1"}[5m]))`}, + {`sum by(name)(rate({region="us-east1"} | json | line_format "something else" | unwrap foo[5m]))`, `sum by (name)(rate({region="us-east1"} | json | unwrap foo[5m]))`}, {`quantile_over_time(1,{region="us-east1"} | json | line_format "something else" | unwrap foo[5m])`, `quantile_over_time(1,{region="us-east1"} | json | unwrap foo[5m])`}, - {`sum by(name)(count_over_time({region="us-east1"} | json | line_format "something else" | label_format foo=bar | line_format "boo"[5m]))`, `sum by(name)(count_over_time({region="us-east1"} | json | label_format foo=bar[5m]))`}, + {`sum by(name)(count_over_time({region="us-east1"} | json | line_format "something else" | label_format foo=bar | line_format "boo"[5m]))`, `sum by (name)(count_over_time({region="us-east1"} | json | label_format foo=bar[5m]))`}, } for _, tt := range tests { t.Run(tt.in, func(t *testing.T) { diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go index 8ae45bea087f..5fa393acecbb 100644 --- a/pkg/logql/shardmapper_test.go +++ b/pkg/logql/shardmapper_test.go @@ -120,7 +120,7 @@ func TestMappingStrings(t *testing.T) { }{ { in: `{foo="bar"}`, - out: `downstream<{foo="bar"}, shard=0_of_2> + out: `downstream<{foo="bar"}, shard=0_of_2> ++ downstream<{foo="bar"}, shard=1_of_2>`, }, { @@ -131,7 +131,7 @@ func TestMappingStrings(t *testing.T) { { in: `sum(rate({foo="bar"}[1m]))`, out: `sum( - downstream + downstream ++ downstream )`, }, @@ -139,7 +139,7 @@ func TestMappingStrings(t *testing.T) { in: `max(count(rate({foo="bar"}[5m]))) / 2`, out: `(max( sum( - downstream + downstream ++ downstream) ) / 2 )`, @@ -147,14 +147,14 @@ func TestMappingStrings(t *testing.T) { { in: `topk(3, rate({foo="bar"}[5m]))`, out: `topk(3, - downstream + downstream ++ downstream )`, }, { in: `sum(max(rate({foo="bar"}[5m])))`, out: `sum(max( - downstream + downstream ++ downstream ))`, }, @@ -169,33 +169,33 @@ func TestMappingStrings(t *testing.T) { { in: `count(rate({foo="bar"} | json [5m]))`, out: `count( - downstream + downstream ++ downstream )`, }, { in: `avg(rate({foo="bar"} | json [5m]))`, out: `avg( - downstream + downstream ++ downstream )`, }, { in: `{foo="bar"} |= "id=123"`, - out: `downstream<{foo="bar"}|="id=123", shard=0_of_2> + out: `downstream<{foo="bar"}|="id=123", shard=0_of_2> ++ downstream<{foo="bar"}|="id=123", shard=1_of_2>`, }, { in: `sum by (cluster) (rate({foo="bar"} |= "id=123" [5m]))`, out: `sum by (cluster) ( - downstream + downstream ++ downstream )`, }, { in: `sum by (cluster) (sum_over_time({foo="bar"} |= "id=123" | logfmt | unwrap latency [5m]))`, out: `sum by (cluster) ( - downstream + downstream ++ downstream )`, }, @@ -231,7 +231,7 @@ func TestMappingStrings(t *testing.T) { in: `sum by (cluster) (rate({foo="bar"} [5m])) + ignoring(machine) sum by (cluster,machine) (rate({foo="bar"} [5m]))`, out: `( sum by (cluster) ( - downstream + downstream ++ downstream ) + ignoring(machine) sum by (cluster, machine) ( @@ -268,6 +268,32 @@ func TestMappingStrings(t *testing.T) { in: `vector(0)`, out: `vector(0.000000)`, }, + { + // or exprs aren't shardable + in: `count_over_time({a=~".+"}[1s]) or count_over_time({a=~".+"}[1s])`, + out: `(downstream++downstreamordownstream++downstream)`, + }, + { + // vector() exprs aren't shardable + in: `sum(count_over_time({a=~".+"}[1s]) + vector(1))`, + out: `sum((downstream++downstream+vector(1.000000)))`, + }, + { + // on() is never shardable as it can mutate labels + in: `sum(count_over_time({a=~".+"}[1s]) * on () count_over_time({a=~".+"}[1s]))`, + out: `sum((downstream++downstream*on()downstream++downstream))`, + }, + { + // ignoring() is never shardable as it can mutate labels + in: `sum(count_over_time({a=~".+"}[1s]) * ignoring (foo) count_over_time({a=~".+"}[1s]))`, + out: `sum((downstream++downstream*ignoring(foo)downstream++downstream))`, + }, + { + // ignoring () doesn't mutate labels and therefore can be shardable + // as long as the operation is shardable + in: `sum(count_over_time({a=~".+"}[1s]) * ignoring () count_over_time({a=~".+"}[1s]))`, + out: `sum(downstream++downstream)`, + }, } { t.Run(tc.in, func(t *testing.T) { ast, err := syntax.ParseExpr(tc.in) diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go index 151f92266ab5..75001b0d50be 100644 --- a/pkg/logql/syntax/ast.go +++ b/pkg/logql/syntax/ast.go @@ -14,6 +14,7 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/grafana/loki/pkg/logql/log" + "github.com/grafana/loki/pkg/logql/log/logfmt" "github.com/grafana/loki/pkg/logqlmodel" ) @@ -536,12 +537,12 @@ func (e *LabelFmtExpr) String() string { } type JSONExpressionParser struct { - Expressions []log.JSONExpression + Expressions []log.LabelExtractionExpr implicit } -func newJSONExpressionParser(expressions []log.JSONExpression) *JSONExpressionParser { +func newJSONExpressionParser(expressions []log.LabelExtractionExpr) *JSONExpressionParser { return &JSONExpressionParser{ Expressions: expressions, } @@ -570,6 +571,48 @@ func (j *JSONExpressionParser) String() string { return sb.String() } +type internedStringSet map[string]struct { + s string + ok bool +} + +type LogfmtExpressionParser struct { + Expressions []log.LabelExtractionExpr + dec *logfmt.Decoder + keys internedStringSet + + implicit +} + +func newLogfmtExpressionParser(expressions []log.LabelExtractionExpr) *LogfmtExpressionParser { + return &LogfmtExpressionParser{ + Expressions: expressions, + } +} + +func (l *LogfmtExpressionParser) Shardable() bool { return true } + +func (l *LogfmtExpressionParser) Walk(f WalkFn) { f(l) } + +func (l *LogfmtExpressionParser) Stage() (log.Stage, error) { + return log.NewLogfmtExpressionParser(l.Expressions) +} + +func (l *LogfmtExpressionParser) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("%s %s ", OpPipe, OpParserTypeLogfmt)) + for i, exp := range l.Expressions { + sb.WriteString(exp.Identifier) + sb.WriteString("=") + sb.WriteString(strconv.Quote(exp.Expression)) + + if i+1 != len(l.Expressions) { + sb.WriteString(",") + } + } + return sb.String() +} + func mustNewMatcher(t labels.MatchType, n, v string) *labels.Matcher { m, err := labels.NewMatcher(t, n, v) if err != nil { @@ -930,9 +973,9 @@ type Grouping struct { func (g Grouping) String() string { var sb strings.Builder if g.Without { - sb.WriteString(" without") + sb.WriteString(" without ") } else if len(g.Groups) > 0 { - sb.WriteString(" by") + sb.WriteString(" by ") } if len(g.Groups) > 0 { @@ -1174,8 +1217,15 @@ func (e *BinOpExpr) String() string { // impl SampleExpr func (e *BinOpExpr) Shardable() bool { if e.Opts != nil && e.Opts.VectorMatching != nil { - // prohibit sharding when we're changing the label groupings, such as on or ignoring - return false + matching := e.Opts.VectorMatching + // prohibit sharding when we're changing the label groupings, + // such as when using `on` grouping or when using + // `ignoring` with a non-zero set of labels to ignore. + // `ignoring ()` is effectively the zero value + // that doesn't mutate labels and is shardable. + if matching.On || len(matching.MatchingLabels) > 0 { + return false + } } return shardableOps[e.Op] && e.SampleExpr.Shardable() && e.RHS.Shardable() } @@ -1755,7 +1805,7 @@ func (e *VectorExpr) Value() (float64, error) { func (e *VectorExpr) Selector() (LogSelectorExpr, error) { return e, e.err } func (e *VectorExpr) HasFilter() bool { return false } -func (e *VectorExpr) Shardable() bool { return true } +func (e *VectorExpr) Shardable() bool { return false } func (e *VectorExpr) Walk(f WalkFn) { f(e) } func (e *VectorExpr) Pipeline() (log.Pipeline, error) { return log.NewNoopPipeline(), nil } func (e *VectorExpr) Matchers() []*labels.Matcher { return nil } diff --git a/pkg/logql/syntax/ast_test.go b/pkg/logql/syntax/ast_test.go index b7c3f016f80e..0c6e88659638 100644 --- a/pkg/logql/syntax/ast_test.go +++ b/pkg/logql/syntax/ast_test.go @@ -172,6 +172,13 @@ func Test_SampleExpr_String(t *testing.T) { "(.*):.*" ) `, + `((( + sum by(typename,pool,commandname,colo)(sum_over_time({_namespace_="appspace", _schema_="appspace-1min", pool=~"r1testlvs", colo=~"slc|lvs|rno", env!~"(pre-production|sandbox)"} | logfmt | status!="0" | ( ( type=~"(?i)^(Error|Exception|Fatal|ERRPAGE|ValidationError)$" or typename=~"(?i)^(Error|Exception|Fatal|ERRPAGE|ValidationError)$" ) or status=~"(?i)^(Error|Exception|Fatal|ERRPAGE|ValidationError)$" ) | commandname=~"(?i).*|UNSET" | unwrap sumcount[5m])) / 60) + or on () + ((sum by(typename,pool,commandname,colo)(sum_over_time({_namespace_="appspace", _schema_="appspace-15min", pool=~"r1testlvs", colo=~"slc|lvs|rno", env!~"(pre-production|sandbox)"} | logfmt | status!="0" | ( ( type=~"(?i)^(Error|Exception|Fatal|ERRPAGE|ValidationError)$" or typename=~"(?i)^(Error|Exception|Fatal|ERRPAGE|ValidationError)$" ) or status=~"(?i)^(Error|Exception|Fatal|ERRPAGE|ValidationError)$" ) | commandname=~"(?i).*|UNSET" | unwrap sumcount[5m])) / 15) / 60)) + or on () + ((sum by(typename,pool,commandname,colo) (sum_over_time({_namespace_="appspace", _schema_="appspace-1h", pool=~"r1testlvs", colo=~"slc|lvs|rno", env!~"(pre-production|sandbox)"} | logfmt | status!="0" | ( ( type=~"(?i)^(Error|Exception|Fatal|ERRPAGE|ValidationError)$" or typename=~"(?i)^(Error|Exception|Fatal|ERRPAGE|ValidationError)$" ) or status=~"(?i)^(Error|Exception|Fatal|ERRPAGE|ValidationError)$" ) | commandname=~"(?i).*|UNSET" | unwrap sumcount[5m])) / 60) / 60))`, + `{app="foo"} | logfmt code="response.code", IPAddress="host"`, } { t.Run(tc, func(t *testing.T) { expr, err := ParseExpr(tc) diff --git a/pkg/logql/syntax/expr.y b/pkg/logql/syntax/expr.y index 65a6c44d509b..fafd46a73b29 100644 --- a/pkg/logql/syntax/expr.y +++ b/pkg/logql/syntax/expr.y @@ -53,9 +53,12 @@ import ( LabelFormatExpr *LabelFmtExpr LabelFormat log.LabelFmt LabelsFormat []log.LabelFmt - JSONExpressionParser *JSONExpressionParser - JSONExpression log.JSONExpression - JSONExpressionList []log.JSONExpression + + LabelExtractionExpression log.LabelExtractionExpr + LabelExtractionExpressionList []log.LabelExtractionExpr + JSONExpressionParser *JSONExpressionParser + LogfmtExpressionParser *LogfmtExpressionParser + UnwrapExpr *UnwrapExpr DecolorizeExpr *DecolorizeExpr OffsetExpr *OffsetExpr @@ -107,9 +110,10 @@ import ( %type labelFormatExpr %type labelFormat %type labelsFormat -%type jsonExpressionParser -%type jsonExpression -%type jsonExpressionList +%type labelExtractionExpression +%type labelExtractionExpressionList +%type logfmtExpressionParser +%type jsonExpressionParser %type unwrapExpr %type unitFilter %type ipLabelFilter @@ -256,6 +260,7 @@ pipelineStage: lineFilters { $$ = $1 } | PIPE labelParser { $$ = $2 } | PIPE jsonExpressionParser { $$ = $2 } + | PIPE logfmtExpressionParser { $$ = $2 } | PIPE labelFilter { $$ = &LabelFilterExpr{LabelFilterer: $2 }} | PIPE lineFormatExpr { $$ = $2 } | PIPE decolorizeExpr { $$ = $2 } @@ -286,7 +291,10 @@ labelParser: ; jsonExpressionParser: - JSON jsonExpressionList { $$ = newJSONExpressionParser($2) } + JSON labelExtractionExpressionList { $$ = newJSONExpressionParser($2) } + +logfmtExpressionParser: + LOGFMT labelExtractionExpressionList { $$ = newLogfmtExpressionParser($2)} lineFormatExpr: LINE_FMT STRING { $$ = newLineFmtExpr($2) }; @@ -318,13 +326,13 @@ labelFilter: | labelFilter OR labelFilter { $$ = log.NewOrLabelFilter($1, $3 ) } ; -jsonExpression: - IDENTIFIER EQ STRING { $$ = log.NewJSONExpr($1, $3) } - | IDENTIFIER { $$ = log.NewJSONExpr($1, $1) } +labelExtractionExpression: + IDENTIFIER EQ STRING { $$ = log.NewLabelExtractionExpr($1, $3) } + | IDENTIFIER { $$ = log.NewLabelExtractionExpr($1, $1) } -jsonExpressionList: - jsonExpression { $$ = []log.JSONExpression{$1} } - | jsonExpressionList COMMA jsonExpression { $$ = append($1, $3) } +labelExtractionExpressionList: + labelExtractionExpression { $$ = []log.LabelExtractionExpr{$1} } + | labelExtractionExpressionList COMMA labelExtractionExpression { $$ = append($1, $3) } ; ipLabelFilter: diff --git a/pkg/logql/syntax/expr.y.go b/pkg/logql/syntax/expr.y.go index ede5c71cec94..2a9ba7ba80c3 100644 --- a/pkg/logql/syntax/expr.y.go +++ b/pkg/logql/syntax/expr.y.go @@ -1,18 +1,16 @@ // Code generated by goyacc -p expr -o pkg/logql/syntax/expr.y.go pkg/logql/syntax/expr.y. DO NOT EDIT. -//line pkg/logql/syntax/expr.y:2 package syntax import __yyfmt__ "fmt" -//line pkg/logql/syntax/expr.y:2 + import ( "github.com/grafana/loki/pkg/logql/log" "github.com/prometheus/prometheus/model/labels" "time" ) -//line pkg/logql/syntax/expr.y:12 type exprSymType struct { yys int Expr Expr @@ -58,15 +56,18 @@ type exprSymType struct { LabelFormatExpr *LabelFmtExpr LabelFormat log.LabelFmt LabelsFormat []log.LabelFmt - JSONExpressionParser *JSONExpressionParser - JSONExpression log.JSONExpression - JSONExpressionList []log.JSONExpression - UnwrapExpr *UnwrapExpr - DecolorizeExpr *DecolorizeExpr - OffsetExpr *OffsetExpr - DropLabel log.DropLabel - DropLabels []log.DropLabel - DropLabelsExpr *DropLabelsExpr + + LabelExtractionExpression log.LabelExtractionExpr + LabelExtractionExpressionList []log.LabelExtractionExpr + JSONExpressionParser *JSONExpressionParser + LogfmtExpressionParser *LogfmtExpressionParser + + UnwrapExpr *UnwrapExpr + DecolorizeExpr *DecolorizeExpr + OffsetExpr *OffsetExpr + DropLabel log.DropLabel + DropLabels []log.DropLabel + DropLabelsExpr *DropLabelsExpr } const BYTES = 57346 @@ -249,17 +250,14 @@ var exprToknames = [...]string{ "MOD", "POW", } - var exprStatenames = [...]string{} const exprEofCode = 1 const exprErrCode = 2 const exprInitialStackSize = 16 -//line pkg/logql/syntax/expr.y:528 -//line yacctab:1 -var exprExca = [...]int8{ +var exprExca = [...]int{ -1, 1, 1, -1, -2, 0, @@ -267,140 +265,144 @@ var exprExca = [...]int8{ const exprPrivate = 57344 -const exprLast = 561 +const exprLast = 563 -var exprAct = [...]int16{ - 265, 210, 82, 4, 121, 64, 175, 190, 187, 219, - 73, 75, 2, 63, 180, 5, 145, 56, 78, 48, +var exprAct = [...]int{ + + 267, 212, 82, 4, 122, 64, 176, 192, 189, 221, + 73, 75, 2, 63, 181, 5, 146, 180, 78, 48, 49, 50, 57, 58, 61, 62, 59, 60, 51, 52, 53, 54, 55, 56, 49, 50, 57, 58, 61, 62, 59, 60, 51, 52, 53, 54, 55, 56, 57, 58, 61, 62, 59, 60, 51, 52, 53, 54, 55, 56, - 51, 52, 53, 54, 55, 56, 159, 160, 268, 107, - 193, 143, 144, 111, 53, 54, 55, 56, 273, 141, - 143, 144, 271, 67, 130, 149, 245, 71, 203, 246, - 244, 154, 71, 270, 69, 70, 147, 320, 177, 69, - 70, 156, 125, 157, 158, 161, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 172, 173, 174, 133, - 212, 71, 338, 92, 81, 212, 83, 84, 69, 70, - 338, 209, 184, 312, 192, 312, 71, 199, 194, 197, - 198, 195, 196, 69, 70, 201, 274, 142, 341, 108, - 268, 243, 282, 72, 212, 217, 176, 329, 72, 269, - 71, 211, 269, 222, 213, 214, 358, 69, 70, 212, - 282, 270, 353, 270, 268, 328, 241, 319, 202, 242, - 240, 282, 135, 230, 231, 232, 327, 72, 71, 130, - 130, 83, 84, 66, 282, 69, 70, 270, 130, 326, - 270, 282, 72, 346, 177, 345, 284, 125, 125, 271, - 263, 266, 177, 272, 71, 275, 125, 107, 278, 111, - 279, 69, 70, 267, 147, 264, 72, 276, 116, 118, - 117, 206, 126, 128, 273, 343, 286, 288, 291, 293, - 89, 239, 322, 192, 303, 296, 300, 212, 294, 221, - 119, 209, 120, 304, 72, 221, 71, 130, 127, 129, - 280, 178, 176, 69, 70, 221, 335, 305, 292, 307, - 309, 177, 311, 107, 290, 125, 235, 310, 321, 306, - 72, 225, 107, 282, 289, 323, 221, 206, 283, 212, - 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, - 103, 104, 105, 106, 221, 287, 130, 332, 333, 277, - 206, 221, 107, 334, 215, 137, 136, 313, 13, 336, - 337, 302, 72, 223, 125, 342, 148, 146, 178, 176, - 220, 237, 207, 301, 16, 13, 229, 228, 348, 227, - 349, 350, 13, 148, 226, 200, 153, 152, 151, 88, - 6, 87, 354, 80, 21, 22, 23, 36, 45, 46, - 37, 39, 40, 38, 41, 42, 43, 44, 24, 25, - 315, 316, 317, 356, 352, 325, 281, 238, 26, 27, - 28, 29, 30, 31, 32, 236, 139, 233, 33, 34, - 35, 47, 19, 224, 218, 216, 208, 79, 234, 351, - 138, 260, 13, 140, 261, 259, 155, 340, 77, 339, - 6, 318, 17, 18, 21, 22, 23, 36, 45, 46, - 37, 39, 40, 38, 41, 42, 43, 44, 24, 25, - 257, 308, 254, 258, 256, 255, 253, 86, 26, 27, - 28, 29, 30, 31, 32, 298, 299, 357, 33, 34, - 35, 47, 19, 130, 150, 251, 85, 248, 252, 250, - 249, 247, 13, 355, 344, 331, 330, 295, 285, 262, - 6, 125, 17, 18, 21, 22, 23, 36, 45, 46, - 37, 39, 40, 38, 41, 42, 43, 44, 24, 25, - 205, 204, 116, 118, 117, 203, 126, 128, 26, 27, - 28, 29, 30, 31, 32, 202, 3, 185, 33, 34, - 35, 47, 19, 74, 119, 297, 120, 183, 188, 122, - 182, 347, 127, 129, 324, 191, 181, 79, 188, 123, - 179, 110, 17, 18, 186, 114, 189, 115, 113, 112, - 65, 131, 124, 132, 109, 91, 90, 11, 10, 9, - 134, 20, 12, 15, 8, 314, 14, 7, 76, 68, - 1, + 51, 52, 53, 54, 55, 56, 56, 160, 161, 107, + 195, 144, 145, 112, 53, 54, 55, 56, 340, 142, + 144, 145, 273, 158, 159, 150, 134, 71, 270, 67, + 275, 155, 71, 272, 69, 70, 148, 322, 270, 69, + 70, 157, 340, 92, 337, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, 175, 211, + 214, 71, 314, 271, 71, 214, 83, 84, 69, 70, + 131, 69, 70, 186, 276, 194, 183, 201, 196, 199, + 200, 197, 198, 360, 71, 270, 203, 143, 126, 136, + 355, 69, 70, 72, 214, 108, 219, 214, 72, 208, + 272, 272, 213, 223, 224, 215, 216, 284, 314, 117, + 119, 118, 331, 127, 129, 275, 81, 66, 83, 84, + 131, 306, 294, 343, 232, 233, 234, 72, 348, 315, + 72, 120, 273, 121, 178, 71, 211, 71, 126, 128, + 130, 71, 69, 70, 69, 70, 272, 239, 69, 70, + 72, 304, 265, 268, 131, 274, 271, 277, 284, 107, + 280, 112, 281, 330, 347, 269, 148, 266, 178, 278, + 214, 321, 126, 237, 214, 131, 345, 223, 288, 290, + 293, 295, 317, 318, 319, 194, 223, 298, 302, 178, + 296, 179, 177, 126, 272, 131, 292, 284, 223, 284, + 208, 72, 329, 72, 328, 291, 324, 72, 305, 307, + 223, 309, 311, 126, 313, 107, 282, 289, 227, 312, + 323, 308, 279, 131, 107, 179, 177, 325, 247, 225, + 205, 248, 246, 223, 117, 119, 118, 178, 127, 129, + 284, 126, 131, 217, 208, 286, 284, 177, 303, 334, + 335, 285, 222, 138, 107, 336, 120, 137, 121, 231, + 126, 338, 339, 147, 128, 130, 209, 344, 230, 13, + 243, 13, 204, 244, 242, 229, 16, 149, 228, 149, + 350, 202, 351, 352, 13, 154, 153, 152, 88, 358, + 87, 80, 6, 245, 356, 354, 21, 22, 23, 36, + 45, 46, 37, 39, 40, 38, 41, 42, 43, 44, + 24, 25, 327, 283, 240, 238, 235, 226, 218, 210, + 26, 27, 28, 29, 30, 31, 32, 79, 140, 236, + 33, 34, 35, 47, 19, 241, 220, 262, 77, 353, + 263, 261, 139, 259, 13, 141, 260, 258, 349, 342, + 341, 320, 6, 310, 17, 18, 21, 22, 23, 36, + 45, 46, 37, 39, 40, 38, 41, 42, 43, 44, + 24, 25, 256, 156, 253, 257, 255, 254, 252, 86, + 26, 27, 28, 29, 30, 31, 32, 300, 301, 359, + 33, 34, 35, 47, 19, 250, 151, 85, 251, 249, + 3, 357, 346, 333, 13, 89, 332, 74, 299, 297, + 287, 190, 6, 264, 17, 18, 21, 22, 23, 36, + 45, 46, 37, 39, 40, 38, 41, 42, 43, 44, + 24, 25, 207, 206, 205, 204, 187, 185, 184, 326, + 26, 27, 28, 29, 30, 31, 32, 193, 182, 79, + 33, 34, 35, 47, 19, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 190, + 123, 124, 110, 111, 17, 18, 188, 115, 191, 116, + 114, 113, 65, 132, 125, 133, 109, 91, 90, 11, + 10, 9, 135, 20, 12, 15, 8, 316, 14, 7, + 76, 68, 1, } +var exprPact = [...]int{ -var exprPact = [...]int16{ - 327, -1000, -57, -1000, -1000, 146, 327, -1000, -1000, -1000, - -1000, -1000, -1000, 392, 330, 101, -1000, 449, 430, 328, - 326, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 329, -1000, -57, -1000, -1000, 130, 329, -1000, -1000, -1000, + -1000, -1000, -1000, 382, 328, 153, -1000, 450, 432, 327, + 325, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 146, -1000, 174, 448, -1000, 113, -1000, - -1000, -1000, -1000, 292, 291, -57, 384, -1000, -1000, 67, - 320, 447, 325, 324, 323, -1000, -1000, 327, 399, 327, - 33, -6, -1000, 327, 327, 327, 327, 327, 327, 327, - 327, 327, 327, 327, 327, 327, 327, -1000, -1000, -1000, - -1000, 185, -1000, -1000, -1000, -1000, 521, -1000, 514, -1000, - 511, -1000, -1000, -1000, -1000, 301, 501, -1000, 523, 520, - 58, -1000, -1000, -1000, 322, -1000, -1000, -1000, -1000, -1000, - 522, 499, 489, 485, 484, 308, 377, 242, 303, 290, - 376, 387, 306, 299, 374, 257, -43, 321, 316, 314, - 313, -31, -31, -13, -13, -73, -73, -73, -73, -25, - -25, -25, -25, -25, -25, 185, 301, 301, 301, 368, - -1000, 386, -1000, -1000, 252, -1000, 366, -1000, 319, 358, - -1000, 67, -1000, 172, 82, 453, 451, 428, 426, 397, - 463, -1000, -1000, -1000, -1000, -1000, -1000, 166, 303, 107, - 150, 200, 184, 122, 285, 166, 327, 236, 357, 264, - -1000, -1000, 182, -1000, 462, -1000, 281, 260, 250, 244, - 193, 185, 79, 521, 461, -1000, 513, 440, 520, 310, - -1000, -1000, -1000, 298, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 130, -1000, 181, 250, -1000, 80, -1000, + -1000, -1000, -1000, 293, 289, -57, 386, -1000, -1000, 67, + 316, 449, 324, 323, 322, -1000, -1000, 329, 426, 329, + 13, -5, -1000, 329, 329, 329, 329, 329, 329, 329, + 329, 329, 329, 329, 329, 329, 329, -1000, -1000, -1000, + -1000, -1000, 175, -1000, -1000, -1000, -1000, 503, 503, 492, + -1000, 491, -1000, -1000, -1000, -1000, 297, 490, -1000, 524, + 502, 58, -1000, -1000, -1000, 318, -1000, -1000, -1000, -1000, + -1000, 504, 489, 488, 487, 486, 302, 360, 187, 314, + 279, 359, 389, 288, 265, 358, 254, -43, 315, 312, + 305, 296, -31, -31, -13, -13, -24, -24, -24, -24, + -25, -25, -25, -25, -25, -25, 175, 297, 297, 297, + 357, -1000, 377, 357, -1000, -1000, 209, -1000, 356, -1000, + 195, 355, -1000, 67, -1000, 326, 284, 451, 430, 428, + 399, 393, 467, -1000, -1000, -1000, -1000, -1000, -1000, 101, + 314, 78, 114, 183, 125, 110, 258, 101, 329, 252, + 354, 287, -1000, -1000, 281, -1000, 464, -1000, 253, 241, + 232, 158, 278, 175, 230, 503, 463, -1000, 466, 442, + 502, 285, -1000, -1000, -1000, 188, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 220, -1000, 229, 78, 46, 78, 423, 1, - 301, 1, 126, 312, 402, 153, 73, -1000, -1000, 218, - -1000, 327, 519, -1000, -1000, 356, 175, -1000, 162, -1000, - -1000, 151, -1000, 133, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 460, 459, -1000, 166, 46, 78, 46, -1000, -1000, - 185, -1000, 1, -1000, 243, -1000, -1000, -1000, 83, 400, - 398, 124, 166, 211, -1000, 458, -1000, -1000, -1000, -1000, - 181, 179, -1000, 46, -1000, 516, 75, 46, 28, 1, - 1, 390, -1000, -1000, 355, -1000, -1000, 148, 46, -1000, - -1000, 1, 457, -1000, -1000, 354, 441, 142, -1000, + -1000, -1000, -1000, -1000, 244, -1000, 157, 107, 46, 107, + 405, 21, 297, 21, 113, 184, 402, 207, 73, -1000, + -1000, 242, -1000, 329, 494, -1000, -1000, 353, 240, -1000, + 238, -1000, -1000, 199, -1000, 148, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 460, 457, -1000, 101, 46, 107, 46, + -1000, -1000, 175, -1000, 21, -1000, 81, -1000, -1000, -1000, + 31, 401, 400, 159, 101, 212, -1000, 456, -1000, -1000, + -1000, -1000, 200, 164, -1000, 46, -1000, 403, 55, 46, + 40, 21, 21, 390, -1000, -1000, 336, -1000, -1000, 126, + 46, -1000, -1000, 21, 455, -1000, -1000, 330, 443, 119, + -1000, } +var exprPgo = [...]int{ -var exprPgo = [...]int16{ - 0, 560, 11, 559, 2, 9, 506, 3, 16, 4, - 558, 557, 556, 555, 15, 554, 553, 552, 551, 550, - 549, 548, 547, 240, 546, 545, 544, 13, 5, 543, - 542, 541, 6, 540, 83, 539, 538, 537, 536, 7, - 535, 8, 534, 531, 14, 530, 1, 529, 519, 0, + 0, 562, 11, 561, 2, 9, 460, 3, 16, 4, + 560, 559, 558, 557, 15, 556, 555, 554, 553, 552, + 551, 550, 549, 465, 548, 547, 546, 13, 5, 545, + 544, 543, 6, 542, 89, 541, 540, 539, 538, 7, + 537, 8, 536, 14, 17, 533, 532, 1, 531, 530, + 0, } +var exprR1 = [...]int{ -var exprR1 = [...]int8{ 0, 1, 2, 2, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 46, 46, 46, 13, 13, 13, 11, 11, 11, 11, + 47, 47, 47, 13, 13, 13, 11, 11, 11, 11, 15, 15, 15, 15, 15, 15, 22, 3, 3, 3, 3, 14, 14, 14, 10, 10, 9, 9, 9, 9, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, - 19, 34, 34, 33, 33, 26, 26, 26, 26, 26, - 43, 35, 36, 41, 41, 42, 42, 42, 40, 32, - 32, 32, 32, 32, 32, 32, 32, 32, 44, 44, - 45, 45, 48, 48, 47, 47, 31, 31, 31, 31, - 31, 31, 31, 29, 29, 29, 29, 29, 29, 29, - 30, 30, 30, 30, 30, 30, 30, 39, 39, 38, - 38, 37, 20, 20, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, 20, 20, 20, 24, 24, 25, - 25, 25, 25, 23, 23, 23, 23, 23, 23, 23, - 23, 21, 21, 21, 17, 18, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 12, 12, 12, + 28, 19, 34, 34, 33, 33, 26, 26, 26, 26, + 26, 46, 45, 35, 36, 41, 41, 42, 42, 42, + 40, 32, 32, 32, 32, 32, 32, 32, 32, 32, + 43, 43, 44, 44, 49, 49, 48, 48, 31, 31, + 31, 31, 31, 31, 31, 29, 29, 29, 29, 29, + 29, 29, 30, 30, 30, 30, 30, 30, 30, 39, + 39, 38, 38, 37, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 24, + 24, 25, 25, 25, 25, 23, 23, 23, 23, 23, + 23, 23, 23, 21, 21, 21, 17, 18, 16, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, - 12, 12, 49, 5, 5, 4, 4, 4, 4, + 12, 12, 12, 12, 50, 5, 5, 4, 4, 4, + 4, } +var exprR2 = [...]int{ -var exprR2 = [...]int8{ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 2, 3, 2, 3, 4, 5, 3, 4, 5, 6, 3, 4, 5, 6, 3, 4, 5, 6, @@ -409,22 +411,23 @@ var exprR2 = [...]int8{ 4, 5, 5, 6, 7, 7, 12, 1, 1, 1, 1, 3, 3, 2, 1, 3, 3, 3, 3, 3, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, - 1, 2, 5, 1, 2, 1, 1, 2, 1, 2, - 2, 2, 1, 3, 3, 1, 3, 3, 2, 1, - 1, 1, 1, 3, 2, 3, 3, 3, 3, 1, - 1, 3, 6, 6, 1, 1, 3, 3, 3, 3, + 2, 1, 2, 5, 1, 2, 1, 1, 2, 1, + 2, 2, 2, 2, 1, 3, 3, 1, 3, 3, + 2, 1, 1, 1, 1, 3, 2, 3, 3, 3, + 3, 1, 1, 3, 6, 6, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, - 3, 2, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 0, 1, 5, - 4, 5, 4, 1, 1, 2, 4, 5, 2, 4, - 5, 1, 2, 2, 4, 1, 1, 1, 1, 1, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, + 1, 1, 3, 2, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, + 1, 5, 4, 5, 4, 1, 1, 2, 4, 5, + 2, 4, 5, 1, 2, 2, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 2, 1, 3, 4, 4, 3, 3, + 1, 1, 1, 1, 2, 1, 3, 4, 4, 3, + 3, } +var exprChk = [...]int{ -var exprChk = [...]int16{ -1000, -1, -2, -6, -7, -14, 23, -11, -15, -20, -21, -22, -17, 15, -12, -16, 7, 85, 86, 65, -18, 27, 28, 29, 41, 42, 51, 52, 53, 54, @@ -436,77 +439,79 @@ var exprChk = [...]int16{ 23, 23, -4, 25, 26, 7, 7, 23, 23, -23, -24, -25, 43, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, -28, -34, -26, - -43, -32, -35, -36, -40, -37, 44, 46, 45, 66, - 68, -9, -48, -47, -30, 23, 48, 74, 49, 75, - 5, -31, -29, 6, -19, 69, 24, 24, 16, 2, - 19, 12, 80, 13, 14, -8, 7, -14, 23, -7, - 7, 23, 23, 23, -7, 7, -2, 70, 71, 72, - 73, -2, -2, -2, -2, -2, -2, -2, -2, -2, - -2, -2, -2, -2, -2, -32, 77, 19, 76, -45, - -44, 5, 6, 6, -32, 6, -42, -41, 5, -38, - -39, 5, -9, 12, 80, 83, 84, 81, 82, 79, - 23, -9, 6, 6, 6, 6, 2, 24, 19, 9, - -46, -27, 47, -14, -8, 24, 19, -7, 7, -5, - 24, 5, -5, 24, 19, 24, 23, 23, 23, 23, - -32, -32, -32, 19, 12, 24, 19, 12, 19, 69, - 8, 4, 7, 69, 8, 4, 7, 8, 4, 7, - 8, 4, 7, 8, 4, 7, 8, 4, 7, 8, - 4, 7, 6, -4, -8, -49, -46, -27, 67, 9, - 47, 9, -46, 50, 24, -46, -27, 24, -4, -7, - 24, 19, 19, 24, 24, 6, -5, 24, -5, 24, - 24, -5, 24, -5, -44, 6, -41, 2, 5, 6, - -39, 23, 23, 24, 24, -46, -27, -46, 8, -49, - -32, -49, 9, 5, -13, 58, 59, 60, 9, 24, - 24, -46, 24, -7, 5, 19, 24, 24, 24, 24, - 6, 6, -4, -46, -49, 23, -49, -46, 47, 9, - 9, 24, -4, 24, 6, 24, 24, 5, -46, -49, - -49, 9, 19, 24, -49, 6, 19, 6, 24, + -46, -45, -32, -35, -36, -40, -37, 44, 46, 45, + 66, 68, -9, -49, -48, -30, 23, 48, 74, 49, + 75, 5, -31, -29, 6, -19, 69, 24, 24, 16, + 2, 19, 12, 80, 13, 14, -8, 7, -14, 23, + -7, 7, 23, 23, 23, -7, 7, -2, 70, 71, + 72, 73, -2, -2, -2, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, -32, 77, 19, 76, + -44, -43, 5, -44, 6, 6, -32, 6, -42, -41, + 5, -38, -39, 5, -9, 12, 80, 83, 84, 81, + 82, 79, 23, -9, 6, 6, 6, 6, 2, 24, + 19, 9, -47, -27, 47, -14, -8, 24, 19, -7, + 7, -5, 24, 5, -5, 24, 19, 24, 23, 23, + 23, 23, -32, -32, -32, 19, 12, 24, 19, 12, + 19, 69, 8, 4, 7, 69, 8, 4, 7, 8, + 4, 7, 8, 4, 7, 8, 4, 7, 8, 4, + 7, 8, 4, 7, 6, -4, -8, -50, -47, -27, + 67, 9, 47, 9, -47, 50, 24, -47, -27, 24, + -4, -7, 24, 19, 19, 24, 24, 6, -5, 24, + -5, 24, 24, -5, 24, -5, -43, 6, -41, 2, + 5, 6, -39, 23, 23, 24, 24, -47, -27, -47, + 8, -50, -32, -50, 9, 5, -13, 58, 59, 60, + 9, 24, 24, -47, 24, -7, 5, 19, 24, 24, + 24, 24, 6, 6, -4, -47, -50, 23, -50, -47, + 47, 9, 9, 24, -4, 24, 6, 24, 24, 5, + -47, -50, -50, 9, 19, 24, -50, 6, 19, 6, + 24, } +var exprDef = [...]int{ -var exprDef = [...]int16{ 0, -2, 1, 2, 3, 11, 0, 4, 5, 6, - 7, 8, 9, 0, 0, 0, 171, 0, 0, 0, - 0, 187, 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 176, 177, 178, 179, - 180, 181, 182, 183, 184, 185, 186, 175, 157, 157, - 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, - 157, 157, 157, 12, 70, 72, 0, 83, 0, 57, + 7, 8, 9, 0, 0, 0, 173, 0, 0, 0, + 0, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 177, 159, 159, + 159, 159, 159, 159, 159, 159, 159, 159, 159, 159, + 159, 159, 159, 12, 70, 72, 0, 84, 0, 57, 58, 59, 60, 3, 2, 0, 0, 63, 64, 0, - 0, 0, 0, 0, 0, 172, 173, 0, 0, 0, - 163, 164, 158, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 71, 84, 73, - 74, 75, 76, 77, 78, 79, 85, 86, 0, 88, - 0, 99, 100, 101, 102, 0, 0, 92, 0, 0, - 0, 114, 115, 81, 0, 80, 10, 13, 61, 62, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, - 171, 0, 0, 0, 3, 0, 142, 0, 0, 165, - 168, 143, 144, 145, 146, 147, 148, 149, 150, 151, - 152, 153, 154, 155, 156, 104, 0, 0, 0, 90, - 110, 109, 87, 89, 0, 91, 98, 95, 0, 141, - 139, 137, 138, 0, 0, 0, 0, 0, 0, 0, - 0, 65, 66, 67, 68, 69, 39, 46, 0, 14, - 0, 0, 0, 0, 0, 50, 0, 3, 171, 0, - 207, 203, 0, 208, 0, 174, 0, 0, 0, 0, - 105, 106, 107, 0, 0, 103, 0, 0, 0, 0, - 121, 128, 135, 0, 120, 127, 134, 116, 123, 130, - 117, 124, 131, 118, 125, 132, 119, 126, 133, 122, - 129, 136, 0, 48, 0, 15, 18, 34, 0, 22, - 0, 26, 0, 0, 0, 0, 0, 38, 52, 3, - 51, 0, 0, 205, 206, 0, 0, 160, 0, 162, - 166, 0, 169, 0, 111, 108, 96, 97, 93, 94, - 140, 0, 0, 82, 47, 19, 35, 36, 202, 23, - 42, 27, 30, 40, 0, 43, 44, 45, 16, 0, - 0, 0, 53, 3, 204, 0, 159, 161, 167, 170, - 0, 0, 49, 37, 31, 0, 17, 20, 0, 24, - 28, 0, 54, 55, 0, 112, 113, 0, 21, 25, - 29, 32, 0, 41, 33, 0, 0, 0, 56, + 0, 0, 0, 0, 0, 174, 175, 0, 0, 0, + 165, 166, 160, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 71, 85, 73, + 74, 75, 76, 77, 78, 79, 80, 86, 87, 0, + 89, 0, 101, 102, 103, 104, 0, 0, 94, 0, + 0, 0, 116, 117, 82, 0, 81, 10, 13, 61, + 62, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3, 173, 0, 0, 0, 3, 0, 144, 0, 0, + 167, 170, 145, 146, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 158, 106, 0, 0, 0, + 91, 112, 111, 92, 88, 90, 0, 93, 100, 97, + 0, 143, 141, 139, 140, 0, 0, 0, 0, 0, + 0, 0, 0, 65, 66, 67, 68, 69, 39, 46, + 0, 14, 0, 0, 0, 0, 0, 50, 0, 3, + 173, 0, 209, 205, 0, 210, 0, 176, 0, 0, + 0, 0, 107, 108, 109, 0, 0, 105, 0, 0, + 0, 0, 123, 130, 137, 0, 122, 129, 136, 118, + 125, 132, 119, 126, 133, 120, 127, 134, 121, 128, + 135, 124, 131, 138, 0, 48, 0, 15, 18, 34, + 0, 22, 0, 26, 0, 0, 0, 0, 0, 38, + 52, 3, 51, 0, 0, 207, 208, 0, 0, 162, + 0, 164, 168, 0, 171, 0, 113, 110, 98, 99, + 95, 96, 142, 0, 0, 83, 47, 19, 35, 36, + 204, 23, 42, 27, 30, 40, 0, 43, 44, 45, + 16, 0, 0, 0, 53, 3, 206, 0, 161, 163, + 169, 172, 0, 0, 49, 37, 31, 0, 17, 20, + 0, 24, 28, 0, 54, 55, 0, 114, 115, 0, + 21, 25, 29, 32, 0, 41, 33, 0, 0, 0, + 56, } +var exprTok1 = [...]int{ -var exprTok1 = [...]int8{ 1, } +var exprTok2 = [...]int{ -var exprTok2 = [...]int8{ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -517,8 +522,7 @@ var exprTok2 = [...]int8{ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, } - -var exprTok3 = [...]int8{ +var exprTok3 = [...]int{ 0, } @@ -528,7 +532,6 @@ var exprErrorMessages = [...]struct { msg string }{} -//line yaccpar:1 /* parser for yacc output */ @@ -600,9 +603,9 @@ func exprErrorMessage(state, lookAhead int) string { expected := make([]int, 0, 4) // Look for shiftable tokens. - base := int(exprPact[state]) + base := exprPact[state] for tok := TOKSTART; tok-1 < len(exprToknames); tok++ { - if n := base + tok; n >= 0 && n < exprLast && int(exprChk[int(exprAct[n])]) == tok { + if n := base + tok; n >= 0 && n < exprLast && exprChk[exprAct[n]] == tok { if len(expected) == cap(expected) { return res } @@ -612,13 +615,13 @@ func exprErrorMessage(state, lookAhead int) string { if exprDef[state] == -2 { i := 0 - for exprExca[i] != -1 || int(exprExca[i+1]) != state { + for exprExca[i] != -1 || exprExca[i+1] != state { i += 2 } // Look for tokens that we accept or reduce. for i += 2; exprExca[i] >= 0; i += 2 { - tok := int(exprExca[i]) + tok := exprExca[i] if tok < TOKSTART || exprExca[i+1] == 0 { continue } @@ -649,30 +652,30 @@ func exprlex1(lex exprLexer, lval *exprSymType) (char, token int) { token = 0 char = lex.Lex(lval) if char <= 0 { - token = int(exprTok1[0]) + token = exprTok1[0] goto out } if char < len(exprTok1) { - token = int(exprTok1[char]) + token = exprTok1[char] goto out } if char >= exprPrivate { if char < exprPrivate+len(exprTok2) { - token = int(exprTok2[char-exprPrivate]) + token = exprTok2[char-exprPrivate] goto out } } for i := 0; i < len(exprTok3); i += 2 { - token = int(exprTok3[i+0]) + token = exprTok3[i+0] if token == char { - token = int(exprTok3[i+1]) + token = exprTok3[i+1] goto out } } out: if token == 0 { - token = int(exprTok2[1]) /* unknown char */ + token = exprTok2[1] /* unknown char */ } if exprDebug >= 3 { __yyfmt__.Printf("lex %s(%d)\n", exprTokname(token), uint(char)) @@ -727,7 +730,7 @@ exprstack: exprS[exprp].yys = exprstate exprnewstate: - exprn = int(exprPact[exprstate]) + exprn = exprPact[exprstate] if exprn <= exprFlag { goto exprdefault /* simple state */ } @@ -738,8 +741,8 @@ exprnewstate: if exprn < 0 || exprn >= exprLast { goto exprdefault } - exprn = int(exprAct[exprn]) - if int(exprChk[exprn]) == exprtoken { /* valid shift */ + exprn = exprAct[exprn] + if exprChk[exprn] == exprtoken { /* valid shift */ exprrcvr.char = -1 exprtoken = -1 exprVAL = exprrcvr.lval @@ -752,7 +755,7 @@ exprnewstate: exprdefault: /* default state action */ - exprn = int(exprDef[exprstate]) + exprn = exprDef[exprstate] if exprn == -2 { if exprrcvr.char < 0 { exprrcvr.char, exprtoken = exprlex1(exprlex, &exprrcvr.lval) @@ -761,18 +764,18 @@ exprdefault: /* look through exception table */ xi := 0 for { - if exprExca[xi+0] == -1 && int(exprExca[xi+1]) == exprstate { + if exprExca[xi+0] == -1 && exprExca[xi+1] == exprstate { break } xi += 2 } for xi += 2; ; xi += 2 { - exprn = int(exprExca[xi+0]) + exprn = exprExca[xi+0] if exprn < 0 || exprn == exprtoken { break } } - exprn = int(exprExca[xi+1]) + exprn = exprExca[xi+1] if exprn < 0 { goto ret0 } @@ -794,10 +797,10 @@ exprdefault: /* find a state where "error" is a legal shift action */ for exprp >= 0 { - exprn = int(exprPact[exprS[exprp].yys]) + exprErrCode + exprn = exprPact[exprS[exprp].yys] + exprErrCode if exprn >= 0 && exprn < exprLast { - exprstate = int(exprAct[exprn]) /* simulate a shift of "error" */ - if int(exprChk[exprstate]) == exprErrCode { + exprstate = exprAct[exprn] /* simulate a shift of "error" */ + if exprChk[exprstate] == exprErrCode { goto exprstack } } @@ -833,7 +836,7 @@ exprdefault: exprpt := exprp _ = exprpt // guard against "declared and not used" - exprp -= int(exprR2[exprn]) + exprp -= exprR2[exprn] // exprp is now the index of $0. Perform the default action. Iff the // reduced production is ε, $1 is possibly out of range. if exprp+1 >= len(exprS) { @@ -844,16 +847,16 @@ exprdefault: exprVAL = exprS[exprp+1] /* consult goto table to find next state */ - exprn = int(exprR1[exprn]) - exprg := int(exprPgo[exprn]) + exprn = exprR1[exprn] + exprg := exprPgo[exprn] exprj := exprg + exprS[exprp].yys + 1 if exprj >= exprLast { - exprstate = int(exprAct[exprg]) + exprstate = exprAct[exprg] } else { - exprstate = int(exprAct[exprj]) - if int(exprChk[exprstate]) != -exprn { - exprstate = int(exprAct[exprg]) + exprstate = exprAct[exprj] + if exprChk[exprstate] != -exprn { + exprstate = exprAct[exprg] } } // dummy call; replaced with literal code @@ -861,1248 +864,1052 @@ exprdefault: case 1: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:138 { exprlex.(*parser).expr = exprDollar[1].Expr } case 2: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:141 { exprVAL.Expr = exprDollar[1].LogExpr } case 3: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:142 { exprVAL.Expr = exprDollar[1].MetricExpr } case 4: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:146 { exprVAL.MetricExpr = exprDollar[1].RangeAggregationExpr } case 5: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:147 { exprVAL.MetricExpr = exprDollar[1].VectorAggregationExpr } case 6: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:148 { exprVAL.MetricExpr = exprDollar[1].BinOpExpr } case 7: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:149 { exprVAL.MetricExpr = exprDollar[1].LiteralExpr } case 8: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:150 { exprVAL.MetricExpr = exprDollar[1].LabelReplaceExpr } case 9: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:151 { exprVAL.MetricExpr = exprDollar[1].VectorExpr } case 10: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:152 { exprVAL.MetricExpr = exprDollar[2].MetricExpr } case 11: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:156 { exprVAL.LogExpr = newMatcherExpr(exprDollar[1].Selector) } case 12: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:157 { exprVAL.LogExpr = newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr) } case 13: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:158 { exprVAL.LogExpr = exprDollar[2].LogExpr } case 14: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:162 { exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, nil, nil) } case 15: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:163 { exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, nil, exprDollar[3].OffsetExpr) } case 16: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:164 { exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, nil, nil) } case 17: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:165 { exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, nil, exprDollar[5].OffsetExpr) } case 18: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:166 { exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, exprDollar[3].UnwrapExpr, nil) } case 19: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:167 { exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, exprDollar[4].UnwrapExpr, exprDollar[3].OffsetExpr) } case 20: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:168 { exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, exprDollar[5].UnwrapExpr, nil) } case 21: exprDollar = exprS[exprpt-6 : exprpt+1] -//line pkg/logql/syntax/expr.y:169 { exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, exprDollar[6].UnwrapExpr, exprDollar[5].OffsetExpr) } case 22: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:170 { exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].duration, exprDollar[2].UnwrapExpr, nil) } case 23: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:171 { exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].duration, exprDollar[2].UnwrapExpr, exprDollar[4].OffsetExpr) } case 24: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:172 { exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[5].duration, exprDollar[3].UnwrapExpr, nil) } case 25: exprDollar = exprS[exprpt-6 : exprpt+1] -//line pkg/logql/syntax/expr.y:173 { exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[5].duration, exprDollar[3].UnwrapExpr, exprDollar[6].OffsetExpr) } case 26: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:174 { exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[3].duration, nil, nil) } case 27: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:175 { exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[3].duration, nil, exprDollar[4].OffsetExpr) } case 28: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:176 { exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[5].duration, nil, nil) } case 29: exprDollar = exprS[exprpt-6 : exprpt+1] -//line pkg/logql/syntax/expr.y:177 { exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[5].duration, nil, exprDollar[6].OffsetExpr) } case 30: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:178 { exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[4].duration, exprDollar[3].UnwrapExpr, nil) } case 31: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:179 { exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[4].duration, exprDollar[3].UnwrapExpr, exprDollar[5].OffsetExpr) } case 32: exprDollar = exprS[exprpt-6 : exprpt+1] -//line pkg/logql/syntax/expr.y:180 { exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[6].duration, exprDollar[4].UnwrapExpr, nil) } case 33: exprDollar = exprS[exprpt-7 : exprpt+1] -//line pkg/logql/syntax/expr.y:181 { exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[6].duration, exprDollar[4].UnwrapExpr, exprDollar[7].OffsetExpr) } case 34: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:182 { exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].PipelineExpr), exprDollar[2].duration, nil, nil) } case 35: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:183 { exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[4].PipelineExpr), exprDollar[2].duration, nil, exprDollar[3].OffsetExpr) } case 36: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:184 { exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].PipelineExpr), exprDollar[2].duration, exprDollar[4].UnwrapExpr, nil) } case 37: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:185 { exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[4].PipelineExpr), exprDollar[2].duration, exprDollar[5].UnwrapExpr, exprDollar[3].OffsetExpr) } case 38: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:186 { exprVAL.LogRangeExpr = exprDollar[2].LogRangeExpr } case 40: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:191 { exprVAL.UnwrapExpr = newUnwrapExpr(exprDollar[3].str, "") } case 41: exprDollar = exprS[exprpt-6 : exprpt+1] -//line pkg/logql/syntax/expr.y:192 { exprVAL.UnwrapExpr = newUnwrapExpr(exprDollar[5].str, exprDollar[3].ConvOp) } case 42: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:193 { exprVAL.UnwrapExpr = exprDollar[1].UnwrapExpr.addPostFilter(exprDollar[3].LabelFilter) } case 43: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:197 { exprVAL.ConvOp = OpConvBytes } case 44: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:198 { exprVAL.ConvOp = OpConvDuration } case 45: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:199 { exprVAL.ConvOp = OpConvDurationSeconds } case 46: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:203 { exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[3].LogRangeExpr, exprDollar[1].RangeOp, nil, nil) } case 47: exprDollar = exprS[exprpt-6 : exprpt+1] -//line pkg/logql/syntax/expr.y:204 { exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[5].LogRangeExpr, exprDollar[1].RangeOp, nil, &exprDollar[3].str) } case 48: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:205 { exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[3].LogRangeExpr, exprDollar[1].RangeOp, exprDollar[5].Grouping, nil) } case 49: exprDollar = exprS[exprpt-7 : exprpt+1] -//line pkg/logql/syntax/expr.y:206 { exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[5].LogRangeExpr, exprDollar[1].RangeOp, exprDollar[7].Grouping, &exprDollar[3].str) } case 50: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:211 { exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].MetricExpr, exprDollar[1].VectorOp, nil, nil) } case 51: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:212 { exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[4].MetricExpr, exprDollar[1].VectorOp, exprDollar[2].Grouping, nil) } case 52: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:213 { exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].MetricExpr, exprDollar[1].VectorOp, exprDollar[5].Grouping, nil) } case 53: exprDollar = exprS[exprpt-6 : exprpt+1] -//line pkg/logql/syntax/expr.y:215 { exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].MetricExpr, exprDollar[1].VectorOp, nil, &exprDollar[3].str) } case 54: exprDollar = exprS[exprpt-7 : exprpt+1] -//line pkg/logql/syntax/expr.y:216 { exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].MetricExpr, exprDollar[1].VectorOp, exprDollar[7].Grouping, &exprDollar[3].str) } case 55: exprDollar = exprS[exprpt-7 : exprpt+1] -//line pkg/logql/syntax/expr.y:217 { exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[6].MetricExpr, exprDollar[1].VectorOp, exprDollar[2].Grouping, &exprDollar[4].str) } case 56: exprDollar = exprS[exprpt-12 : exprpt+1] -//line pkg/logql/syntax/expr.y:222 { exprVAL.LabelReplaceExpr = mustNewLabelReplaceExpr(exprDollar[3].MetricExpr, exprDollar[5].str, exprDollar[7].str, exprDollar[9].str, exprDollar[11].str) } case 57: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:226 { exprVAL.Filter = labels.MatchRegexp } case 58: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:227 { exprVAL.Filter = labels.MatchEqual } case 59: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:228 { exprVAL.Filter = labels.MatchNotRegexp } case 60: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:229 { exprVAL.Filter = labels.MatchNotEqual } case 61: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:233 { exprVAL.Selector = exprDollar[2].Matchers } case 62: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:234 { exprVAL.Selector = exprDollar[2].Matchers } case 63: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:235 { } case 64: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:239 { exprVAL.Matchers = []*labels.Matcher{exprDollar[1].Matcher} } case 65: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:240 { exprVAL.Matchers = append(exprDollar[1].Matchers, exprDollar[3].Matcher) } case 66: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:244 { exprVAL.Matcher = mustNewMatcher(labels.MatchEqual, exprDollar[1].str, exprDollar[3].str) } case 67: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:245 { exprVAL.Matcher = mustNewMatcher(labels.MatchNotEqual, exprDollar[1].str, exprDollar[3].str) } case 68: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:246 { exprVAL.Matcher = mustNewMatcher(labels.MatchRegexp, exprDollar[1].str, exprDollar[3].str) } case 69: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:247 { exprVAL.Matcher = mustNewMatcher(labels.MatchNotRegexp, exprDollar[1].str, exprDollar[3].str) } case 70: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:251 { exprVAL.PipelineExpr = MultiStageExpr{exprDollar[1].PipelineStage} } case 71: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:252 { exprVAL.PipelineExpr = append(exprDollar[1].PipelineExpr, exprDollar[2].PipelineStage) } case 72: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:256 { exprVAL.PipelineStage = exprDollar[1].LineFilters } case 73: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:257 { exprVAL.PipelineStage = exprDollar[2].LabelParser } case 74: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:258 { exprVAL.PipelineStage = exprDollar[2].JSONExpressionParser } case 75: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:259 { - exprVAL.PipelineStage = &LabelFilterExpr{LabelFilterer: exprDollar[2].LabelFilter} + exprVAL.PipelineStage = exprDollar[2].LogfmtExpressionParser } case 76: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:260 { - exprVAL.PipelineStage = exprDollar[2].LineFormatExpr + exprVAL.PipelineStage = &LabelFilterExpr{LabelFilterer: exprDollar[2].LabelFilter} } case 77: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:261 { - exprVAL.PipelineStage = exprDollar[2].DecolorizeExpr + exprVAL.PipelineStage = exprDollar[2].LineFormatExpr } case 78: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:262 { - exprVAL.PipelineStage = exprDollar[2].LabelFormatExpr + exprVAL.PipelineStage = exprDollar[2].DecolorizeExpr } case 79: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:263 { - exprVAL.PipelineStage = exprDollar[2].DropLabelsExpr + exprVAL.PipelineStage = exprDollar[2].LabelFormatExpr } case 80: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.PipelineStage = exprDollar[2].DropLabelsExpr + } + case 81: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:267 { exprVAL.FilterOp = OpFilterIP } - case 81: + case 82: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:271 { exprVAL.LineFilter = newLineFilterExpr(exprDollar[1].Filter, "", exprDollar[2].str) } - case 82: + case 83: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:272 { exprVAL.LineFilter = newLineFilterExpr(exprDollar[1].Filter, exprDollar[2].FilterOp, exprDollar[4].str) } - case 83: + case 84: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:276 { exprVAL.LineFilters = exprDollar[1].LineFilter } - case 84: + case 85: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:277 { exprVAL.LineFilters = newNestedLineFilterExpr(exprDollar[1].LineFilters, exprDollar[2].LineFilter) } - case 85: + case 86: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:281 { exprVAL.LabelParser = newLabelParserExpr(OpParserTypeJSON, "") } - case 86: + case 87: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:282 { exprVAL.LabelParser = newLabelParserExpr(OpParserTypeLogfmt, "") } - case 87: + case 88: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:283 { exprVAL.LabelParser = newLabelParserExpr(OpParserTypeRegexp, exprDollar[2].str) } - case 88: + case 89: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:284 { exprVAL.LabelParser = newLabelParserExpr(OpParserTypeUnpack, "") } - case 89: + case 90: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:285 { exprVAL.LabelParser = newLabelParserExpr(OpParserTypePattern, exprDollar[2].str) } - case 90: + case 91: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:289 { - exprVAL.JSONExpressionParser = newJSONExpressionParser(exprDollar[2].JSONExpressionList) + exprVAL.JSONExpressionParser = newJSONExpressionParser(exprDollar[2].LabelExtractionExpressionList) } - case 91: + case 92: + exprDollar = exprS[exprpt-2 : exprpt+1] + { + exprVAL.LogfmtExpressionParser = newLogfmtExpressionParser(exprDollar[2].LabelExtractionExpressionList) + } + case 93: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:291 { exprVAL.LineFormatExpr = newLineFmtExpr(exprDollar[2].str) } - case 92: + case 94: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:293 { exprVAL.DecolorizeExpr = newDecolorizeExpr() } - case 93: + case 95: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:296 { exprVAL.LabelFormat = log.NewRenameLabelFmt(exprDollar[1].str, exprDollar[3].str) } - case 94: + case 96: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:297 { exprVAL.LabelFormat = log.NewTemplateLabelFmt(exprDollar[1].str, exprDollar[3].str) } - case 95: + case 97: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:301 { exprVAL.LabelsFormat = []log.LabelFmt{exprDollar[1].LabelFormat} } - case 96: + case 98: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:302 { exprVAL.LabelsFormat = append(exprDollar[1].LabelsFormat, exprDollar[3].LabelFormat) } - case 98: + case 100: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:307 { exprVAL.LabelFormatExpr = newLabelFmtExpr(exprDollar[2].LabelsFormat) } - case 99: + case 101: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:310 { exprVAL.LabelFilter = log.NewStringLabelFilter(exprDollar[1].Matcher) } - case 100: + case 102: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:311 { exprVAL.LabelFilter = exprDollar[1].IPLabelFilter } - case 101: + case 103: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:312 { exprVAL.LabelFilter = exprDollar[1].UnitFilter } - case 102: + case 104: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:313 { exprVAL.LabelFilter = exprDollar[1].NumberFilter } - case 103: + case 105: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:314 { exprVAL.LabelFilter = exprDollar[2].LabelFilter } - case 104: + case 106: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:315 { exprVAL.LabelFilter = log.NewAndLabelFilter(exprDollar[1].LabelFilter, exprDollar[2].LabelFilter) } - case 105: + case 107: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:316 { exprVAL.LabelFilter = log.NewAndLabelFilter(exprDollar[1].LabelFilter, exprDollar[3].LabelFilter) } - case 106: + case 108: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:317 { exprVAL.LabelFilter = log.NewAndLabelFilter(exprDollar[1].LabelFilter, exprDollar[3].LabelFilter) } - case 107: + case 109: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:318 { exprVAL.LabelFilter = log.NewOrLabelFilter(exprDollar[1].LabelFilter, exprDollar[3].LabelFilter) } - case 108: + case 110: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:322 { - exprVAL.JSONExpression = log.NewJSONExpr(exprDollar[1].str, exprDollar[3].str) + exprVAL.LabelExtractionExpression = log.NewLabelExtractionExpr(exprDollar[1].str, exprDollar[3].str) } - case 109: + case 111: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:323 { - exprVAL.JSONExpression = log.NewJSONExpr(exprDollar[1].str, exprDollar[1].str) + exprVAL.LabelExtractionExpression = log.NewLabelExtractionExpr(exprDollar[1].str, exprDollar[1].str) } - case 110: + case 112: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:326 { - exprVAL.JSONExpressionList = []log.JSONExpression{exprDollar[1].JSONExpression} + exprVAL.LabelExtractionExpressionList = []log.LabelExtractionExpr{exprDollar[1].LabelExtractionExpression} } - case 111: + case 113: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:327 { - exprVAL.JSONExpressionList = append(exprDollar[1].JSONExpressionList, exprDollar[3].JSONExpression) + exprVAL.LabelExtractionExpressionList = append(exprDollar[1].LabelExtractionExpressionList, exprDollar[3].LabelExtractionExpression) } - case 112: + case 114: exprDollar = exprS[exprpt-6 : exprpt+1] -//line pkg/logql/syntax/expr.y:331 { exprVAL.IPLabelFilter = log.NewIPLabelFilter(exprDollar[5].str, exprDollar[1].str, log.LabelFilterEqual) } - case 113: + case 115: exprDollar = exprS[exprpt-6 : exprpt+1] -//line pkg/logql/syntax/expr.y:332 { exprVAL.IPLabelFilter = log.NewIPLabelFilter(exprDollar[5].str, exprDollar[1].str, log.LabelFilterNotEqual) } - case 114: + case 116: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:336 { exprVAL.UnitFilter = exprDollar[1].DurationFilter } - case 115: + case 117: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:337 { exprVAL.UnitFilter = exprDollar[1].BytesFilter } - case 116: + case 118: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:340 { exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, exprDollar[3].duration) } - case 117: + case 119: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:341 { exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, exprDollar[3].duration) } - case 118: + case 120: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:342 { exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, exprDollar[3].duration) } - case 119: + case 121: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:343 { exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, exprDollar[3].duration) } - case 120: + case 122: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:344 { exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, exprDollar[3].duration) } - case 121: + case 123: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:345 { exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].duration) } - case 122: + case 124: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:346 { exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].duration) } - case 123: + case 125: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:350 { exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, exprDollar[3].bytes) } - case 124: + case 126: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:351 { exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, exprDollar[3].bytes) } - case 125: + case 127: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:352 { exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, exprDollar[3].bytes) } - case 126: + case 128: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:353 { exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, exprDollar[3].bytes) } - case 127: + case 129: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:354 { exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, exprDollar[3].bytes) } - case 128: + case 130: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:355 { exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].bytes) } - case 129: + case 131: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:356 { exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].bytes) } - case 130: + case 132: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:360 { exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) } - case 131: + case 133: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:361 { exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) } - case 132: + case 134: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:362 { exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) } - case 133: + case 135: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:363 { exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) } - case 134: + case 136: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:364 { exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) } - case 135: + case 137: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:365 { exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) } - case 136: + case 138: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:366 { exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str)) } - case 137: + case 139: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:370 { exprVAL.DropLabel = log.NewDropLabel(nil, exprDollar[1].str) } - case 138: + case 140: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:371 { exprVAL.DropLabel = log.NewDropLabel(exprDollar[1].Matcher, "") } - case 139: + case 141: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:374 { exprVAL.DropLabels = []log.DropLabel{exprDollar[1].DropLabel} } - case 140: + case 142: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:375 { exprVAL.DropLabels = append(exprDollar[1].DropLabels, exprDollar[3].DropLabel) } - case 141: + case 143: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:378 { exprVAL.DropLabelsExpr = newDropLabelsExpr(exprDollar[2].DropLabels) } - case 142: + case 144: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:382 { exprVAL.BinOpExpr = mustNewBinOpExpr("or", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 143: + case 145: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:383 { exprVAL.BinOpExpr = mustNewBinOpExpr("and", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 144: + case 146: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:384 { exprVAL.BinOpExpr = mustNewBinOpExpr("unless", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 145: + case 147: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:385 { exprVAL.BinOpExpr = mustNewBinOpExpr("+", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 146: + case 148: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:386 { exprVAL.BinOpExpr = mustNewBinOpExpr("-", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 147: + case 149: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:387 { exprVAL.BinOpExpr = mustNewBinOpExpr("*", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 148: + case 150: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:388 { exprVAL.BinOpExpr = mustNewBinOpExpr("/", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 149: + case 151: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:389 { exprVAL.BinOpExpr = mustNewBinOpExpr("%", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 150: + case 152: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:390 { exprVAL.BinOpExpr = mustNewBinOpExpr("^", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 151: + case 153: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:391 { exprVAL.BinOpExpr = mustNewBinOpExpr("==", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 152: + case 154: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:392 { exprVAL.BinOpExpr = mustNewBinOpExpr("!=", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 153: + case 155: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:393 { exprVAL.BinOpExpr = mustNewBinOpExpr(">", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 154: + case 156: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:394 { exprVAL.BinOpExpr = mustNewBinOpExpr(">=", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 155: + case 157: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:395 { exprVAL.BinOpExpr = mustNewBinOpExpr("<", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 156: + case 158: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:396 { exprVAL.BinOpExpr = mustNewBinOpExpr("<=", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) } - case 157: + case 159: exprDollar = exprS[exprpt-0 : exprpt+1] -//line pkg/logql/syntax/expr.y:400 { exprVAL.BoolModifier = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}} } - case 158: + case 160: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:404 { exprVAL.BoolModifier = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: true} } - case 159: + case 161: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:411 { exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier exprVAL.OnOrIgnoringModifier.VectorMatching.On = true exprVAL.OnOrIgnoringModifier.VectorMatching.MatchingLabels = exprDollar[4].Labels } - case 160: + case 162: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:417 { exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier exprVAL.OnOrIgnoringModifier.VectorMatching.On = true } - case 161: + case 163: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:422 { exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier exprVAL.OnOrIgnoringModifier.VectorMatching.MatchingLabels = exprDollar[4].Labels } - case 162: + case 164: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:427 { exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier } - case 163: + case 165: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:433 { exprVAL.BinOpModifier = exprDollar[1].BoolModifier } - case 164: + case 166: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:434 { exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier } - case 165: + case 167: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:436 { exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne } - case 166: + case 168: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:441 { exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne } - case 167: + case 169: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:446 { exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne exprVAL.BinOpModifier.VectorMatching.Include = exprDollar[4].Labels } - case 168: + case 170: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:452 { exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany } - case 169: + case 171: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:457 { exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany } - case 170: + case 172: exprDollar = exprS[exprpt-5 : exprpt+1] -//line pkg/logql/syntax/expr.y:462 { exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany exprVAL.BinOpModifier.VectorMatching.Include = exprDollar[4].Labels } - case 171: + case 173: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:470 { exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[1].str, false) } - case 172: + case 174: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:471 { exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[2].str, false) } - case 173: + case 175: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:472 { exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[2].str, true) } - case 174: + case 176: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:476 { exprVAL.VectorExpr = NewVectorExpr(exprDollar[3].str) } - case 175: + case 177: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:479 { exprVAL.Vector = OpTypeVector } - case 176: + case 178: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:483 { exprVAL.VectorOp = OpTypeSum } - case 177: + case 179: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:484 { exprVAL.VectorOp = OpTypeAvg } - case 178: + case 180: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:485 { exprVAL.VectorOp = OpTypeCount } - case 179: + case 181: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:486 { exprVAL.VectorOp = OpTypeMax } - case 180: + case 182: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:487 { exprVAL.VectorOp = OpTypeMin } - case 181: + case 183: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:488 { exprVAL.VectorOp = OpTypeStddev } - case 182: + case 184: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:489 { exprVAL.VectorOp = OpTypeStdvar } - case 183: + case 185: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:490 { exprVAL.VectorOp = OpTypeBottomK } - case 184: + case 186: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:491 { exprVAL.VectorOp = OpTypeTopK } - case 185: + case 187: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:492 { exprVAL.VectorOp = OpTypeSort } - case 186: + case 188: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:493 { exprVAL.VectorOp = OpTypeSortDesc } - case 187: + case 189: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:497 { exprVAL.RangeOp = OpRangeTypeCount } - case 188: + case 190: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:498 { exprVAL.RangeOp = OpRangeTypeRate } - case 189: + case 191: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:499 { exprVAL.RangeOp = OpRangeTypeRateCounter } - case 190: + case 192: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:500 { exprVAL.RangeOp = OpRangeTypeBytes } - case 191: + case 193: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:501 { exprVAL.RangeOp = OpRangeTypeBytesRate } - case 192: + case 194: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:502 { exprVAL.RangeOp = OpRangeTypeAvg } - case 193: + case 195: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:503 { exprVAL.RangeOp = OpRangeTypeSum } - case 194: + case 196: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:504 { exprVAL.RangeOp = OpRangeTypeMin } - case 195: + case 197: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:505 { exprVAL.RangeOp = OpRangeTypeMax } - case 196: + case 198: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:506 { exprVAL.RangeOp = OpRangeTypeStdvar } - case 197: + case 199: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:507 { exprVAL.RangeOp = OpRangeTypeStddev } - case 198: + case 200: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:508 { exprVAL.RangeOp = OpRangeTypeQuantile } - case 199: + case 201: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:509 { exprVAL.RangeOp = OpRangeTypeFirst } - case 200: + case 202: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:510 { exprVAL.RangeOp = OpRangeTypeLast } - case 201: + case 203: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:511 { exprVAL.RangeOp = OpRangeTypeAbsent } - case 202: + case 204: exprDollar = exprS[exprpt-2 : exprpt+1] -//line pkg/logql/syntax/expr.y:515 { exprVAL.OffsetExpr = newOffsetExpr(exprDollar[2].duration) } - case 203: + case 205: exprDollar = exprS[exprpt-1 : exprpt+1] -//line pkg/logql/syntax/expr.y:518 { exprVAL.Labels = []string{exprDollar[1].str} } - case 204: + case 206: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:519 { exprVAL.Labels = append(exprDollar[1].Labels, exprDollar[3].str) } - case 205: + case 207: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:523 { exprVAL.Grouping = &Grouping{Without: false, Groups: exprDollar[3].Labels} } - case 206: + case 208: exprDollar = exprS[exprpt-4 : exprpt+1] -//line pkg/logql/syntax/expr.y:524 { exprVAL.Grouping = &Grouping{Without: true, Groups: exprDollar[3].Labels} } - case 207: + case 209: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:525 { exprVAL.Grouping = &Grouping{Without: false, Groups: nil} } - case 208: + case 210: exprDollar = exprS[exprpt-3 : exprpt+1] -//line pkg/logql/syntax/expr.y:526 { exprVAL.Grouping = &Grouping{Without: true, Groups: nil} } diff --git a/pkg/logql/syntax/lex_test.go b/pkg/logql/syntax/lex_test.go index 45d60f354169..f96742284658 100644 --- a/pkg/logql/syntax/lex_test.go +++ b/pkg/logql/syntax/lex_test.go @@ -85,6 +85,7 @@ func TestLex(t *testing.T) { # |~ "\\w+" | json`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, JSON}}, {`{foo="bar"} | json code="response.code", param="request.params[0]"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, JSON, IDENTIFIER, EQ, STRING, COMMA, IDENTIFIER, EQ, STRING}}, + {`{foo="bar"} | logfmt code="response.code", IPAddress="host"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, IDENTIFIER, EQ, STRING, COMMA, IDENTIFIER, EQ, STRING}}, {`decolorize`, []int{DECOLORIZE}}, } { t.Run(tc.input, func(t *testing.T) { diff --git a/pkg/logql/syntax/parser.go b/pkg/logql/syntax/parser.go index 35021452b929..93835afc67fc 100644 --- a/pkg/logql/syntax/parser.go +++ b/pkg/logql/syntax/parser.go @@ -103,19 +103,12 @@ func ParseExprWithoutValidation(input string) (expr Expr, err error) { func validateExpr(expr Expr) error { switch e := expr.(type) { case SampleExpr: - err := validateSampleExpr(e) - if err != nil { - return err - } + return validateSampleExpr(e) case LogSelectorExpr: - err := validateMatchers(e.Matchers()) - if err != nil { - return err - } + return validateLogSelectorExpression(e) default: return logqlmodel.NewParseError(fmt.Sprintf("unexpected expression type: %v", e), 0, 0) } - return nil } // validateMatchers checks whether a query would touch all the streams in the query range or uses at least one matcher to select specific streams. @@ -169,7 +162,6 @@ func validateSampleExpr(expr SampleExpr) error { return err } return validateSampleExpr(e.RHS) - case *LiteralExpr: if e.err != nil { return e.err @@ -195,7 +187,16 @@ func validateSampleExpr(expr SampleExpr) error { if err != nil { return err } - return validateMatchers(selector.Matchers()) + return validateLogSelectorExpression(selector) + } +} + +func validateLogSelectorExpression(expr LogSelectorExpr) error { + switch e := expr.(type) { + case *VectorExpr: + return nil + default: + return validateMatchers(e.Matchers()) } } diff --git a/pkg/logql/syntax/parser_test.go b/pkg/logql/syntax/parser_test.go index ee7d070f9b83..37fe5ea436b7 100644 --- a/pkg/logql/syntax/parser_test.go +++ b/pkg/logql/syntax/parser_test.go @@ -2867,6 +2867,23 @@ func TestParse(t *testing.T) { in: `vector(abc)`, err: logqlmodel.NewParseError("syntax error: unexpected IDENTIFIER, expecting NUMBER", 1, 8), }, + { + in: `vector(1)`, + exp: &VectorExpr{Val: 1, err: nil}, + }, + { + in: `label_replace(vector(0), "foo", "bar", "", "")`, + exp: mustNewLabelReplaceExpr(&VectorExpr{Val: 0, err: nil}, "foo", "bar", "", ""), + }, + { + in: `sum(vector(0))`, + exp: &VectorAggregationExpr{ + Left: &VectorExpr{Val: 0, err: nil}, + Grouping: &Grouping{}, + Params: 0, + Operation: "sum", + }, + }, { in: `{app="foo"} # |= "bar" @@ -2937,8 +2954,8 @@ func TestParse(t *testing.T) { exp: &PipelineExpr{ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), MultiStages: MultiStageExpr{ - newJSONExpressionParser([]log.JSONExpression{ - log.NewJSONExpr("bob", `top.sub["index"]`), + newJSONExpressionParser([]log.LabelExtractionExpr{ + log.NewLabelExtractionExpr("bob", `top.sub["index"]`), }), }, }, @@ -2948,8 +2965,8 @@ func TestParse(t *testing.T) { exp: &PipelineExpr{ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), MultiStages: MultiStageExpr{ - newJSONExpressionParser([]log.JSONExpression{ - log.NewJSONExpr("bob", `top.params[0]`), + newJSONExpressionParser([]log.LabelExtractionExpr{ + log.NewLabelExtractionExpr("bob", `top.params[0]`), }), }, }, @@ -2959,9 +2976,9 @@ func TestParse(t *testing.T) { exp: &PipelineExpr{ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), MultiStages: MultiStageExpr{ - newJSONExpressionParser([]log.JSONExpression{ - log.NewJSONExpr("response_code", `response.code`), - log.NewJSONExpr("api_key", `request.headers["X-API-KEY"]`), + newJSONExpressionParser([]log.LabelExtractionExpr{ + log.NewLabelExtractionExpr("response_code", `response.code`), + log.NewLabelExtractionExpr("api_key", `request.headers["X-API-KEY"]`), }), }, }, @@ -2971,10 +2988,10 @@ func TestParse(t *testing.T) { exp: &PipelineExpr{ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), MultiStages: MultiStageExpr{ - newJSONExpressionParser([]log.JSONExpression{ - log.NewJSONExpr("response_code", `response_code`), - log.NewJSONExpr("api_key", `request.headers["X-API-KEY"]`), - log.NewJSONExpr("layer7_something_specific", `layer7_something_specific`), + newJSONExpressionParser([]log.LabelExtractionExpr{ + log.NewLabelExtractionExpr("response_code", `response_code`), + log.NewLabelExtractionExpr("api_key", `request.headers["X-API-KEY"]`), + log.NewLabelExtractionExpr("layer7_something_specific", `layer7_something_specific`), }), }, }, @@ -2985,8 +3002,8 @@ func TestParse(t *testing.T) { Left: &LogRange{ Left: &PipelineExpr{ MultiStages: MultiStageExpr{ - newJSONExpressionParser([]log.JSONExpression{ - log.NewJSONExpr("layer7_something_specific", `layer7_something_specific`), + newJSONExpressionParser([]log.LabelExtractionExpr{ + log.NewLabelExtractionExpr("layer7_something_specific", `layer7_something_specific`), }), }, Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, @@ -2996,6 +3013,91 @@ func TestParse(t *testing.T) { Operation: "count_over_time", }, }, + { + // binop always includes vector matching. Default is `without ()`, + // the zero value. + in: ` + sum(count_over_time({foo="bar"}[5m])) or vector(1) + `, + exp: mustNewBinOpExpr( + OpTypeOr, + &BinOpOptions{ + VectorMatching: &VectorMatching{Card: CardOneToOne}, + }, + mustNewVectorAggregationExpr(newRangeAggregationExpr( + &LogRange{ + Left: &MatchersExpr{ + Mts: []*labels.Matcher{ + mustNewMatcher(labels.MatchEqual, "foo", "bar"), + }, + }, + Interval: 5 * time.Minute, + }, OpRangeTypeCount, nil, nil), + "sum", + &Grouping{}, + nil, + ), + NewVectorExpr("1"), + ), + }, + { + in: `{app="foo"} | logfmt message="msg"`, + exp: &PipelineExpr{ + Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), + MultiStages: MultiStageExpr{ + newLogfmtExpressionParser([]log.LabelExtractionExpr{ + log.NewLabelExtractionExpr("message", `msg`), + }), + }, + }, + }, + { + in: `{app="foo"} | logfmt msg`, + exp: &PipelineExpr{ + Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), + MultiStages: MultiStageExpr{ + newLogfmtExpressionParser([]log.LabelExtractionExpr{ + log.NewLabelExtractionExpr("msg", `msg`), + }), + }, + }, + }, + { + in: `{app="foo"} | logfmt msg, err `, + exp: &PipelineExpr{ + Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), + MultiStages: MultiStageExpr{ + newLogfmtExpressionParser([]log.LabelExtractionExpr{ + log.NewLabelExtractionExpr("msg", `msg`), + log.NewLabelExtractionExpr("err", `err`), + }), + }, + }, + }, + { + in: `{app="foo"} | logfmt msg, err="error"`, + exp: &PipelineExpr{ + Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), + MultiStages: MultiStageExpr{ + newLogfmtExpressionParser([]log.LabelExtractionExpr{ + log.NewLabelExtractionExpr("msg", `msg`), + log.NewLabelExtractionExpr("err", `error`), + }), + }, + }, + }, + { + in: `{app="foo"} | logfmt msg="message", apiKey="api_key"`, + exp: &PipelineExpr{ + Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), + MultiStages: MultiStageExpr{ + newLogfmtExpressionParser([]log.LabelExtractionExpr{ + log.NewLabelExtractionExpr("msg", `message`), + log.NewLabelExtractionExpr("apiKey", `api_key`), + }), + }, + }, + }, } { t.Run(tc.in, func(t *testing.T) { ast, err := ParseExpr(tc.in) diff --git a/pkg/logql/syntax/prettier.go b/pkg/logql/syntax/prettier.go index 297cd25e8acb..3853bfa62218 100644 --- a/pkg/logql/syntax/prettier.go +++ b/pkg/logql/syntax/prettier.go @@ -139,6 +139,11 @@ func (e *JSONExpressionParser) Pretty(level int) string { return commonPrefixIndent(level, e) } +// e.g: | logfmt label="expression", another="expression" +func (e *LogfmtExpressionParser) Pretty(level int) string { + return commonPrefixIndent(level, e) +} + // e.g: sum_over_time({foo="bar"} | logfmt | unwrap bytes_processed [5m]) func (e *UnwrapExpr) Pretty(level int) string { s := indent(level) diff --git a/pkg/logql/syntax/walk_test.go b/pkg/logql/syntax/walk_test.go index 78d990d22858..678e89df99c4 100644 --- a/pkg/logql/syntax/walk_test.go +++ b/pkg/logql/syntax/walk_test.go @@ -49,7 +49,7 @@ func Test_AppendMatchers(t *testing.T) { { desc: "vector range query", expr: `sum by(cluster)(rate({job="foo"} |= "bar" | logfmt | bazz="buzz"[5m]))`, - want: `sum by(cluster)(rate({job="foo", namespace="a"} |= "bar" | logfmt | bazz="buzz"[5m]))`, + want: `sum by (cluster)(rate({job="foo", namespace="a"} |= "bar" | logfmt | bazz="buzz"[5m]))`, matchers: []*labels.Matcher{ { Name: "namespace", @@ -61,7 +61,7 @@ func Test_AppendMatchers(t *testing.T) { { desc: "bin op query", expr: `sum by(cluster)(rate({job="foo"} |= "bar" | logfmt | bazz="buzz"[5m])) / sum by(cluster)(rate({job="foo"} |= "bar" | logfmt | bazz="buzz"[5m]))`, - want: `(sum by(cluster)(rate({job="foo", namespace="a"} |= "bar" | logfmt | bazz="buzz"[5m])) / sum by(cluster)(rate({job="foo", namespace="a"} |= "bar" | logfmt | bazz="buzz"[5m])))`, + want: `(sum by (cluster)(rate({job="foo", namespace="a"} |= "bar" | logfmt | bazz="buzz"[5m])) / sum by (cluster)(rate({job="foo", namespace="a"} |= "bar" | logfmt | bazz="buzz"[5m])))`, matchers: []*labels.Matcher{ { Name: "namespace", diff --git a/pkg/loki/common/common.go b/pkg/loki/common/common.go index 4a9adcc0a271..0d272c95f22e 100644 --- a/pkg/loki/common/common.go +++ b/pkg/loki/common/common.go @@ -2,6 +2,7 @@ package common import ( "flag" + "github.com/grafana/loki/pkg/storage/chunk/client/alibaba" "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/netutil" @@ -66,19 +67,21 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { } type Storage struct { - S3 aws.S3Config `yaml:"s3"` - GCS gcp.GCSConfig `yaml:"gcs"` - Azure azure.BlobStorageConfig `yaml:"azure"` - BOS baidubce.BOSStorageConfig `yaml:"bos"` - Swift openstack.SwiftConfig `yaml:"swift"` - FSConfig FilesystemConfig `yaml:"filesystem"` - Hedging hedging.Config `yaml:"hedging"` + S3 aws.S3Config `yaml:"s3"` + GCS gcp.GCSConfig `yaml:"gcs"` + Azure azure.BlobStorageConfig `yaml:"azure"` + AlibabaCloud alibaba.OssConfig `yaml:"alibabacloud"` + BOS baidubce.BOSStorageConfig `yaml:"bos"` + Swift openstack.SwiftConfig `yaml:"swift"` + FSConfig FilesystemConfig `yaml:"filesystem"` + Hedging hedging.Config `yaml:"hedging"` } func (s *Storage) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { s.S3.RegisterFlagsWithPrefix(prefix, f) s.GCS.RegisterFlagsWithPrefix(prefix, f) s.Azure.RegisterFlagsWithPrefix(prefix, f) + s.AlibabaCloud.RegisterFlagsWithPrefix(prefix, f) s.Swift.RegisterFlagsWithPrefix(prefix, f) s.BOS.RegisterFlagsWithPrefix(prefix, f) s.FSConfig.RegisterFlagsWithPrefix(prefix, f) diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go index 1a482916ac0c..485c57c5301c 100644 --- a/pkg/loki/config_wrapper_test.go +++ b/pkg/loki/config_wrapper_test.go @@ -239,7 +239,7 @@ memberlist: assert.ErrorIs(t, err, ErrTooManyStorageConfigs) }) - t.Run("when common s3 storage config is provided, ruler and storage config are defaulted to use it", func(t *testing.T) { + t.Run("when common s3 storage config is provided (with empty session token), ruler and storage config are defaulted to use it", func(t *testing.T) { s3Config := `common: storage: s3: @@ -271,6 +271,66 @@ memberlist: assert.Equal(t, "us-east1", actual.Region) assert.Equal(t, "abc123", actual.AccessKeyID) assert.Equal(t, "def789", actual.SecretAccessKey.String()) + assert.Equal(t, "", actual.SessionToken.String()) + assert.Equal(t, true, actual.Insecure) + assert.Equal(t, false, actual.SSEEncryption) + assert.Equal(t, 5*time.Minute, actual.HTTPConfig.ResponseHeaderTimeout) + assert.Equal(t, false, actual.HTTPConfig.InsecureSkipVerify) + + assert.Equal(t, aws.SignatureVersionV4, actual.SignatureVersion, + "signature version should equal default value") + assert.Equal(t, 90*time.Second, actual.HTTPConfig.IdleConnTimeout, + "idle connection timeout should equal default value") + } + + // should remain empty + assert.EqualValues(t, defaults.Ruler.StoreConfig.Azure, config.Ruler.StoreConfig.Azure) + assert.EqualValues(t, defaults.Ruler.StoreConfig.GCS, config.Ruler.StoreConfig.GCS) + assert.EqualValues(t, defaults.Ruler.StoreConfig.Swift, config.Ruler.StoreConfig.Swift) + assert.EqualValues(t, defaults.Ruler.StoreConfig.Local, config.Ruler.StoreConfig.Local) + assert.EqualValues(t, defaults.Ruler.StoreConfig.BOS, config.Ruler.StoreConfig.BOS) + // should remain empty + assert.EqualValues(t, defaults.StorageConfig.AzureStorageConfig, config.StorageConfig.AzureStorageConfig) + assert.EqualValues(t, defaults.StorageConfig.GCSConfig, config.StorageConfig.GCSConfig) + assert.EqualValues(t, defaults.StorageConfig.Swift, config.StorageConfig.Swift) + assert.EqualValues(t, defaults.StorageConfig.FSConfig, config.StorageConfig.FSConfig) + assert.EqualValues(t, defaults.StorageConfig.BOSStorageConfig, config.StorageConfig.BOSStorageConfig) + }) + + t.Run("when common s3 storage config is provided (with session token), ruler and storage config are defaulted to use it", func(t *testing.T) { + s3Config := `common: + storage: + s3: + s3: s3://foo-bucket/example + endpoint: s3://foo-bucket + region: us-east1 + access_key_id: abc123 + secret_access_key: def789 + session_token: 456abc + insecure: true + http_config: + response_header_timeout: 5m` + + config, defaults := testContext(s3Config, nil) + + expected, err := url.Parse("s3://foo-bucket/example") + require.NoError(t, err) + + assert.Equal(t, "s3", config.Ruler.StoreConfig.Type) + + for _, actual := range []aws.S3Config{ + config.Ruler.StoreConfig.S3, + config.StorageConfig.AWSStorageConfig.S3Config, + } { + require.NotNil(t, actual.S3.URL) + assert.Equal(t, *expected, *actual.S3.URL) + + assert.Equal(t, false, actual.S3ForcePathStyle) + assert.Equal(t, "s3://foo-bucket", actual.Endpoint) + assert.Equal(t, "us-east1", actual.Region) + assert.Equal(t, "abc123", actual.AccessKeyID) + assert.Equal(t, "def789", actual.SecretAccessKey.String()) + assert.Equal(t, "456abc", actual.SessionToken.String()) assert.Equal(t, true, actual.Insecure) assert.Equal(t, false, actual.SSEEncryption) assert.Equal(t, 5*time.Minute, actual.HTTPConfig.ResponseHeaderTimeout) diff --git a/pkg/push/go.mod b/pkg/push/go.mod index 903b5b276909..e99a33d3e7ad 100644 --- a/pkg/push/go.mod +++ b/pkg/push/go.mod @@ -13,9 +13,9 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/kr/text v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect diff --git a/pkg/push/go.sum b/pkg/push/go.sum index 1039a424b183..c13553cd3727 100644 --- a/pkg/push/go.sum +++ b/pkg/push/go.sum @@ -1,3 +1,4 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -11,7 +12,11 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -32,20 +37,20 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -64,6 +69,7 @@ google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175 google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/querier/queryrange/limits.go b/pkg/querier/queryrange/limits.go index ae72b2e645d3..c10b48932409 100644 --- a/pkg/querier/queryrange/limits.go +++ b/pkg/querier/queryrange/limits.go @@ -49,18 +49,44 @@ type Limits interface { type limits struct { Limits - splitDuration time.Duration + // Use pointers so nil value can indicate if the value was set. + splitDuration *time.Duration + maxQueryParallelism *int } func (l limits) QuerySplitDuration(user string) time.Duration { - return l.splitDuration + if l.splitDuration == nil { + return l.Limits.QuerySplitDuration(user) + } + return *l.splitDuration +} + +func (l limits) TSDBMaxQueryParallelism(user string) int { + if l.maxQueryParallelism == nil { + return l.Limits.TSDBMaxQueryParallelism(user) + } + return *l.maxQueryParallelism +} + +func (l limits) MaxQueryParallelism(user string) int { + if l.maxQueryParallelism == nil { + return l.Limits.MaxQueryParallelism(user) + } + return *l.maxQueryParallelism } // WithSplitByLimits will construct a Limits with a static split by duration. func WithSplitByLimits(l Limits, splitBy time.Duration) Limits { return limits{ Limits: l, - splitDuration: splitBy, + splitDuration: &splitBy, + } +} + +func WithMaxParallelism(l Limits, maxParallelism int) Limits { + return limits{ + Limits: l, + maxQueryParallelism: &maxParallelism, } } diff --git a/pkg/querier/queryrange/queryrangebase/promql_test.go b/pkg/querier/queryrange/queryrangebase/promql_test.go index 7e61b8674c38..eeeb92d923d4 100644 --- a/pkg/querier/queryrange/queryrangebase/promql_test.go +++ b/pkg/querier/queryrange/queryrangebase/promql_test.go @@ -662,12 +662,13 @@ func factor(f float64) func(float64) float64 { // 2 series. func splitByShard(shardIndex, shardTotal int, testMatrices *testMatrix) *testMatrix { res := &testMatrix{} + var it chunkenc.Iterator for i, s := range testMatrices.series { if i%shardTotal != shardIndex { continue } var points []promql.Point - it := s.Iterator() + it = s.Iterator(it) for it.Next() != chunkenc.ValNone { t, v := it.At() points = append(points, promql.Point{ diff --git a/pkg/querier/queryrange/queryrangebase/series_test.go b/pkg/querier/queryrange/queryrangebase/series_test.go index ed3a017af9f7..0bfdfaefcced 100644 --- a/pkg/querier/queryrange/queryrangebase/series_test.go +++ b/pkg/querier/queryrange/queryrangebase/series_test.go @@ -57,8 +57,9 @@ func Test_ResponseToSamples(t *testing.T) { setCt := 0 + var iter chunkenc.Iterator for set.Next() { - iter := set.At().Iterator() + iter = set.At().Iterator(iter) require.Nil(t, set.Err()) sampleCt := 0 diff --git a/pkg/querier/queryrange/queryrangebase/test_utils_test.go b/pkg/querier/queryrange/queryrangebase/test_utils_test.go index 6ca297f037e3..14c23fd24408 100644 --- a/pkg/querier/queryrange/queryrangebase/test_utils_test.go +++ b/pkg/querier/queryrange/queryrangebase/test_utils_test.go @@ -107,6 +107,7 @@ func TestNewMockShardedqueryable(t *testing.T) { expectedSeries := int(math.Pow(float64(tc.labelBuckets), float64(len(tc.labelSet)))) seriesCt := 0 + var iter chunkenc.Iterator for i := 0; i < tc.shards; i++ { set := q.Select(false, nil, &labels.Matcher{ @@ -122,7 +123,7 @@ func TestNewMockShardedqueryable(t *testing.T) { for set.Next() { seriesCt++ - iter := set.At().Iterator() + iter = set.At().Iterator(iter) samples := 0 for iter.Next() != chunkenc.ValNone { samples++ diff --git a/pkg/querier/queryrange/querysharding.go b/pkg/querier/queryrange/querysharding.go index 02203c901428..83b5b12a7dbb 100644 --- a/pkg/querier/queryrange/querysharding.go +++ b/pkg/querier/queryrange/querysharding.go @@ -8,13 +8,12 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/grafana/dskit/tenant" "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/promql/parser" "github.com/weaveworks/common/httpgrpc" - "github.com/grafana/dskit/tenant" - "github.com/grafana/loki/pkg/loghttp" "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/logqlmodel" @@ -24,6 +23,7 @@ import ( "github.com/grafana/loki/pkg/util" util_log "github.com/grafana/loki/pkg/util/log" "github.com/grafana/loki/pkg/util/marshal" + "github.com/grafana/loki/pkg/util/spanlogger" "github.com/grafana/loki/pkg/util/validation" ) @@ -36,6 +36,7 @@ func NewQueryShardMiddleware( middlewareMetrics *queryrangebase.InstrumentMiddlewareMetrics, shardingMetrics *logql.MapperMetrics, limits Limits, + maxShards int, ) queryrangebase.Middleware { noshards := !hasShards(confs) @@ -49,7 +50,7 @@ func NewQueryShardMiddleware( } mapperware := queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler { - return newASTMapperware(confs, next, logger, shardingMetrics, limits) + return newASTMapperware(confs, next, logger, shardingMetrics, limits, maxShards) }) return queryrangebase.MiddlewareFunc(func(next queryrangebase.Handler) queryrangebase.Handler { @@ -71,28 +72,35 @@ func newASTMapperware( logger log.Logger, metrics *logql.MapperMetrics, limits Limits, + maxShards int, ) *astMapperware { return &astMapperware{ - confs: confs, - logger: log.With(logger, "middleware", "QueryShard.astMapperware"), - limits: limits, - next: next, - ng: logql.NewDownstreamEngine(logql.EngineOpts{LogExecutingQuery: false}, DownstreamHandler{next: next, limits: limits}, limits, logger), - metrics: metrics, + confs: confs, + logger: log.With(logger, "middleware", "QueryShard.astMapperware"), + limits: limits, + next: next, + ng: logql.NewDownstreamEngine(logql.EngineOpts{LogExecutingQuery: false}, DownstreamHandler{next: next, limits: limits}, limits, logger), + metrics: metrics, + maxShards: maxShards, } } type astMapperware struct { - confs ShardingConfigs - logger log.Logger - limits Limits - next queryrangebase.Handler - ng *logql.DownstreamEngine - metrics *logql.MapperMetrics + confs ShardingConfigs + logger log.Logger + limits Limits + next queryrangebase.Handler + ng *logql.DownstreamEngine + metrics *logql.MapperMetrics + maxShards int } func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { - logger := util_log.WithContext(ctx, ast.logger) + logger := spanlogger.FromContextWithFallback( + ctx, + util_log.WithContext(ctx, ast.logger), + ) + maxRVDuration, maxOffset, err := maxRangeVectorAndOffsetDuration(r.GetQuery()) if err != nil { level.Warn(logger).Log("err", err.Error(), "msg", "failed to get range-vector and offset duration so skipped AST mapper for request") @@ -117,6 +125,7 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que ast.ng.Opts().MaxLookBackPeriod, ast.logger, MinWeightedParallelism(ctx, tenants, ast.confs, ast.limits, model.Time(r.GetStart()), model.Time(r.GetEnd())), + ast.maxShards, r, ast.next, ) @@ -125,9 +134,6 @@ func (ast *astMapperware) Do(ctx context.Context, r queryrangebase.Request) (que } mapper := logql.NewShardMapper(resolver, ast.metrics) - if err != nil { - return nil, err - } noop, parsed, err := mapper.Parse(r.GetQuery()) if err != nil { diff --git a/pkg/querier/queryrange/querysharding_test.go b/pkg/querier/queryrange/querysharding_test.go index 4332571b4e3b..dec954e38619 100644 --- a/pkg/querier/queryrange/querysharding_test.go +++ b/pkg/querier/queryrange/querysharding_test.go @@ -167,6 +167,7 @@ func Test_astMapper(t *testing.T) { log.NewNopLogger(), nilShardingMetrics, fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1, queryTimeout: time.Second}, + 0, ) resp, err := mware.Do(user.InjectOrgID(context.Background(), "1"), defaultReq().WithQuery(`{food="bar"}`)) @@ -200,6 +201,7 @@ func Test_ShardingByPass(t *testing.T) { log.NewNopLogger(), nilShardingMetrics, fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1}, + 0, ) _, err := mware.Do(user.InjectOrgID(context.Background(), "1"), defaultReq().WithQuery(`1+1`)) @@ -272,7 +274,8 @@ func Test_InstantSharding(t *testing.T) { maxSeries: math.MaxInt32, maxQueryParallelism: 10, queryTimeout: time.Second, - }) + }, + 0) response, err := sharding.Wrap(queryrangebase.HandlerFunc(func(c context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { lock.Lock() defer lock.Unlock() @@ -553,6 +556,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { log.NewNopLogger(), nilShardingMetrics, fakeLimits{maxSeries: math.MaxInt32, maxQueryParallelism: 1, queryTimeout: time.Second}, + 0, ) resp, err := mware.Do(user.InjectOrgID(context.Background(), "1"), tc.req) diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 506e249c5032..564e8d284072 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -76,6 +76,11 @@ func NewTripperware( return nil, nil, err } + limitedTripperware, err := NewLimitedTripperware(cfg, log, limits, schema, LokiCodec, c, metrics) + if err != nil { + return nil, nil, err + } + // NOTE: When we would start caching response from non-metric queries we would have to consider cache gen headers as well in // MergeResponse implementation for Loki codecs same as it is done in Cortex at https://github.com/cortexproject/cortex/blob/21bad57b346c730d684d6d0205efef133422ab28/pkg/querier/queryrange/query_range.go#L170 logFilterTripperware, err := NewLogFilterTripperware(cfg, log, limits, schema, LokiCodec, c, metrics) @@ -99,26 +104,28 @@ func NewTripperware( } return func(next http.RoundTripper) http.RoundTripper { metricRT := metricsTripperware(next) + limitedRT := limitedTripperware(next) logFilterRT := logFilterTripperware(next) seriesRT := seriesTripperware(next) labelsRT := labelsTripperware(next) instantRT := instantMetricTripperware(next) - return newRoundTripper(log, next, logFilterRT, metricRT, seriesRT, labelsRT, instantRT, limits) + return newRoundTripper(log, next, limitedRT, logFilterRT, metricRT, seriesRT, labelsRT, instantRT, limits) }, c, nil } type roundTripper struct { logger log.Logger - next, log, metric, series, labels, instantMetric http.RoundTripper + next, limited, log, metric, series, labels, instantMetric http.RoundTripper limits Limits } // newRoundTripper creates a new queryrange roundtripper -func newRoundTripper(logger log.Logger, next, log, metric, series, labels, instantMetric http.RoundTripper, limits Limits) roundTripper { +func newRoundTripper(logger log.Logger, next, limited, log, metric, series, labels, instantMetric http.RoundTripper, limits Limits) roundTripper { return roundTripper{ logger: logger, + limited: limited, log: log, limits: limits, metric: metric, @@ -154,13 +161,17 @@ func (r roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { return r.metric.RoundTrip(req) case syntax.LogSelectorExpr: // Note, this function can mutate the request - _, err := transformRegexQuery(req, e) + expr, err := transformRegexQuery(req, e) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } if err := validateLimits(req, rangeQuery.Limit, r.limits); err != nil { return nil, err } + // Only filter expressions are query sharded + if !expr.HasFilter() { + return r.limited.RoundTrip(req) + } return r.log.RoundTrip(req) default: @@ -310,6 +321,77 @@ func NewLogFilterTripperware( metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware metrics.MiddlewareMapperMetrics.shardMapper, limits, + 0, // 0 is unlimited shards + ), + ) + } + + if cfg.MaxRetries > 0 { + queryRangeMiddleware = append( + queryRangeMiddleware, queryrangebase.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), + queryrangebase.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics), + ) + } + + return func(next http.RoundTripper) http.RoundTripper { + if len(queryRangeMiddleware) > 0 { + return NewLimitedRoundTripper(next, codec, limits, schema.Configs, queryRangeMiddleware...) + } + return next + }, nil +} + +// NewLimitedTripperware creates a new frontend tripperware responsible for handling log requests which are label matcher only, no filter expression. +func NewLimitedTripperware( + cfg Config, + log log.Logger, + limits Limits, + schema config.SchemaConfig, + codec queryrangebase.Codec, + c cache.Cache, + metrics *Metrics, +) (queryrangebase.Tripperware, error) { + queryRangeMiddleware := []queryrangebase.Middleware{ + StatsCollectorMiddleware(), + NewLimitsMiddleware(limits), + queryrangebase.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), + // Limited queries only need to fetch up to the requested line limit worth of logs, + // Our defaults for splitting and parallelism are much too aggressive for large customers and result in + // potentially GB of logs being returned by all the shards and splits which will overwhelm the frontend + // Therefore we force max parallelism to one so that these queries are executed sequentially. + // Below we also fix the number of shards to a static number. + SplitByIntervalMiddleware(schema.Configs, WithMaxParallelism(limits, 1), codec, splitByTime, metrics.SplitByMetrics), + } + + if cfg.CacheResults { + queryCacheMiddleware := NewLogResultCache( + log, + limits, + c, + func(r queryrangebase.Request) bool { + return !r.GetCachingOptions().Disabled + }, + cfg.Transformer, + metrics.LogResultCacheMetrics, + ) + queryRangeMiddleware = append( + queryRangeMiddleware, + queryrangebase.InstrumentMiddleware("log_results_cache", metrics.InstrumentMiddlewareMetrics), + queryCacheMiddleware, + ) + } + + if cfg.ShardedQueries { + queryRangeMiddleware = append(queryRangeMiddleware, + NewQueryShardMiddleware( + log, + schema.Configs, + metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware + metrics.MiddlewareMapperMetrics.shardMapper, + limits, + // Too many shards on limited queries results in slowing down this type of query + // and overwhelming the frontend, therefore we fix the number of shards to prevent this. + 32, ), ) } @@ -483,6 +565,7 @@ func NewMetricTripperware( metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware metrics.MiddlewareMapperMetrics.shardMapper, limits, + 0, // 0 is unlimited shards ), ) } @@ -530,6 +613,7 @@ func NewInstantMetricTripperware( metrics.InstrumentMiddlewareMetrics, // instrumentation is included in the sharding middleware metrics.MiddlewareMapperMetrics.shardMapper, limits, + 0, // 0 is unlimited shards ), ) } diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index bd202cce6908..fda0e0956702 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -442,6 +442,10 @@ func TestPostQueries(t *testing.T) { t.Error("unexpected default roundtripper called") return nil, nil }), + queryrangebase.RoundTripFunc(func(*http.Request) (*http.Response, error) { + t.Error("unexpected default roundtripper called") + return nil, nil + }), queryrangebase.RoundTripFunc(func(*http.Request) (*http.Response, error) { return nil, nil }), diff --git a/pkg/querier/queryrange/shard_resolver.go b/pkg/querier/queryrange/shard_resolver.go index 245d1537329a..5f196550a523 100644 --- a/pkg/querier/queryrange/shard_resolver.go +++ b/pkg/querier/queryrange/shard_resolver.go @@ -28,6 +28,7 @@ func shardResolverForConf( defaultLookback time.Duration, logger log.Logger, maxParallelism int, + maxShards int, r queryrangebase.Request, handler queryrangebase.Handler, ) (logql.ShardResolver, bool) { @@ -39,6 +40,7 @@ func shardResolverForConf( from: model.Time(r.GetStart()), through: model.Time(r.GetEnd()), maxParallelism: maxParallelism, + maxShards: maxShards, defaultLookback: defaultLookback, }, true } @@ -55,6 +57,7 @@ type dynamicShardResolver struct { from, through model.Time maxParallelism int + maxShards int defaultLookback time.Duration } @@ -121,7 +124,8 @@ func (r *dynamicShardResolver) Shards(e syntax.Expr) (int, error) { } combined := stats.MergeStats(results...) - factor := guessShardFactor(combined) + factor := guessShardFactor(combined, r.maxShards) + var bytesPerShard = combined.Bytes if factor > 0 { bytesPerShard = combined.Bytes / uint64(factor) @@ -151,7 +155,7 @@ const ( // is at least two, the range of data per shard is (maxBytesPerShard/2, maxBytesPerShard] // For instance, for a maxBytesPerShard of 500MB and a query touching 1000MB, we split into two shards of 500MB. // If there are 1004MB, we split into four shards of 251MB. -func guessShardFactor(stats stats.Stats) int { +func guessShardFactor(stats stats.Stats, maxShards int) int { minShards := float64(stats.Bytes) / float64(maxBytesPerShard) // round up to nearest power of 2 @@ -160,8 +164,21 @@ func guessShardFactor(stats stats.Stats) int { // Since x^0 == 1 and we only support factors of 2 // reset this edge case manually factor := int(math.Pow(2, power)) + if maxShards > 0 { + factor = min(factor, maxShards) + } + + // shortcut: no need to run any sharding logic when factor=1 + // as it's the same as no sharding if factor == 1 { factor = 0 } return factor } + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/pkg/querier/queryrange/shard_resolver_test.go b/pkg/querier/queryrange/shard_resolver_test.go index 6660ec264ab0..92fea236d58a 100644 --- a/pkg/querier/queryrange/shard_resolver_test.go +++ b/pkg/querier/queryrange/shard_resolver_test.go @@ -11,8 +11,9 @@ import ( func TestGuessShardFactor(t *testing.T) { for _, tc := range []struct { - stats stats.Stats - exp int + stats stats.Stats + maxShards int + exp int }{ { // no data == no sharding @@ -43,9 +44,30 @@ func TestGuessShardFactor(t *testing.T) { Bytes: maxBytesPerShard, }, }, + { + maxShards: 8, + exp: 4, + stats: stats.Stats{ + Bytes: maxBytesPerShard * 4, + }, + }, + { + maxShards: 2, + exp: 2, + stats: stats.Stats{ + Bytes: maxBytesPerShard * 4, + }, + }, + { + maxShards: 1, + exp: 0, + stats: stats.Stats{ + Bytes: maxBytesPerShard * 4, + }, + }, } { t.Run(fmt.Sprintf("%+v", tc.stats), func(t *testing.T) { - require.Equal(t, tc.exp, guessShardFactor(tc.stats)) + require.Equal(t, tc.exp, guessShardFactor(tc.stats, tc.maxShards)) }) } } diff --git a/pkg/querier/queryrange/split_by_range_test.go b/pkg/querier/queryrange/split_by_range_test.go index 948e50e27cdb..f92428afa024 100644 --- a/pkg/querier/queryrange/split_by_range_test.go +++ b/pkg/querier/queryrange/split_by_range_test.go @@ -52,9 +52,9 @@ func Test_RangeVectorSplit(t *testing.T) { Path: "/loki/api/v1/query", }, subQueries: []queryrangebase.RequestResponse{ - subQueryRequestResponse(`sum by(bar)(bytes_over_time({app="foo"}[1m]))`, 10), - subQueryRequestResponse(`sum by(bar)(bytes_over_time({app="foo"}[1m] offset 1m0s))`, 20), - subQueryRequestResponse(`sum by(bar)(bytes_over_time({app="foo"}[1m] offset 2m0s))`, 30), + subQueryRequestResponse(`sum by (bar)(bytes_over_time({app="foo"}[1m]))`, 10), + subQueryRequestResponse(`sum by (bar)(bytes_over_time({app="foo"}[1m] offset 1m0s))`, 20), + subQueryRequestResponse(`sum by (bar)(bytes_over_time({app="foo"}[1m] offset 2m0s))`, 30), }, expected: expectedMergedResponse(10 + 20 + 30), }, @@ -78,9 +78,9 @@ func Test_RangeVectorSplit(t *testing.T) { Path: "/loki/api/v1/query", }, subQueries: []queryrangebase.RequestResponse{ - subQueryRequestResponse(`sum by(bar)(count_over_time({app="foo"}[1m]))`, 0), - subQueryRequestResponse(`sum by(bar)(count_over_time({app="foo"}[1m] offset 1m0s))`, 0), - subQueryRequestResponse(`sum by(bar)(count_over_time({app="foo"}[1m] offset 2m0s))`, 0), + subQueryRequestResponse(`sum by (bar)(count_over_time({app="foo"}[1m]))`, 0), + subQueryRequestResponse(`sum by (bar)(count_over_time({app="foo"}[1m] offset 1m0s))`, 0), + subQueryRequestResponse(`sum by (bar)(count_over_time({app="foo"}[1m] offset 2m0s))`, 0), }, expected: expectedMergedResponse(0 + 0 + 0), }, @@ -104,9 +104,9 @@ func Test_RangeVectorSplit(t *testing.T) { Path: "/loki/api/v1/query", }, subQueries: []queryrangebase.RequestResponse{ - subQueryRequestResponse(`sum by(bar)(sum_over_time({app="foo"} | unwrap bar[1m]))`, 1), - subQueryRequestResponse(`sum by(bar)(sum_over_time({app="foo"} | unwrap bar[1m] offset 1m0s))`, 2), - subQueryRequestResponse(`sum by(bar)(sum_over_time({app="foo"} | unwrap bar[1m] offset 2m0s))`, 3), + subQueryRequestResponse(`sum by (bar)(sum_over_time({app="foo"} | unwrap bar[1m]))`, 1), + subQueryRequestResponse(`sum by (bar)(sum_over_time({app="foo"} | unwrap bar[1m] offset 1m0s))`, 2), + subQueryRequestResponse(`sum by (bar)(sum_over_time({app="foo"} | unwrap bar[1m] offset 2m0s))`, 3), }, expected: expectedMergedResponse(1 + 2 + 3), }, diff --git a/pkg/querier/series/series_set.go b/pkg/querier/series/series_set.go index 604612e8aee8..99e26899a1f8 100644 --- a/pkg/querier/series/series_set.go +++ b/pkg/querier/series/series_set.go @@ -85,7 +85,7 @@ func (c *ConcreteSeries) Labels() labels.Labels { } // Iterator implements storage.Series -func (c *ConcreteSeries) Iterator() chunkenc.Iterator { +func (c *ConcreteSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { return NewConcreteSeriesIterator(c) } @@ -246,8 +246,8 @@ func (d DeletedSeries) Labels() labels.Labels { return d.series.Labels() } -func (d DeletedSeries) Iterator() chunkenc.Iterator { - return NewDeletedSeriesIterator(d.series.Iterator(), d.deletedIntervals) +func (d DeletedSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { + return NewDeletedSeriesIterator(d.series.Iterator(nil), d.deletedIntervals) } type DeletedSeriesIterator struct { @@ -349,7 +349,7 @@ func (e emptySeries) Labels() labels.Labels { return e.labels } -func (emptySeries) Iterator() chunkenc.Iterator { +func (emptySeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { return NewEmptySeriesIterator() } diff --git a/pkg/ruler/base/compat.go b/pkg/ruler/base/compat.go index b91afc8784ec..caaa2b4e8971 100644 --- a/pkg/ruler/base/compat.go +++ b/pkg/ruler/base/compat.go @@ -75,7 +75,7 @@ func (a *PusherAppender) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ return 0, errors.New("updating metadata is unsupported") } -func (a *PusherAppender) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram) (storage.SeriesRef, error) { +func (a *PusherAppender) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { return 0, errors.New("native histograms are unsupported") } diff --git a/pkg/ruler/base/ruler.go b/pkg/ruler/base/ruler.go index 6f85b3526ff6..880089421176 100644 --- a/pkg/ruler/base/ruler.go +++ b/pkg/ruler/base/ruler.go @@ -677,7 +677,7 @@ func filterRules(shardingAlgo string, userID string, ruleGroups []*rulespb.RuleG var result = make([]*rulespb.RuleGroupDesc, 0, len(ruleGroups)) for _, g := range ruleGroups { - logger = log.With(logger, "user", userID, "namespace", g.Namespace, "group", g.Name) + glog := log.With(logger, "user", userID, "namespace", g.Namespace, "group", g.Name) switch shardingAlgo { @@ -686,15 +686,15 @@ func filterRules(shardingAlgo string, userID string, ruleGroups []*rulespb.RuleG owned, err := instanceOwnsRuleGroup(ring, g, instanceAddr) if err != nil { ringCheckErrors.Inc() - level.Error(logger).Log("msg", "failed to check if the ruler replica owns the rule group", "err", err) + level.Error(glog).Log("msg", "failed to check if the ruler replica owns the rule group", "err", err) continue } if owned { - level.Debug(logger).Log("msg", "rule group owned") + level.Debug(glog).Log("msg", "rule group owned") result = append(result, g) } else { - level.Debug(logger).Log("msg", "rule group not owned, ignoring") + level.Debug(glog).Log("msg", "rule group not owned, ignoring") } continue @@ -702,7 +702,7 @@ func filterRules(shardingAlgo string, userID string, ruleGroups []*rulespb.RuleG // if we are sharding by rule, we need to create rule groups for each rule to comply with Prometheus' rule engine's expectations case util.ShardingAlgoByRule: for _, r := range g.Rules { - rlog := log.With(logger, "rule", getRuleIdentifier(r)) + rlog := log.With(glog, "rule", getRuleIdentifier(r)) owned, err := instanceOwnsRule(ring, g, r, instanceAddr) if err != nil { diff --git a/pkg/ruler/base/storage.go b/pkg/ruler/base/storage.go index 7e9f3cf28b50..5ccb19fce630 100644 --- a/pkg/ruler/base/storage.go +++ b/pkg/ruler/base/storage.go @@ -19,6 +19,7 @@ import ( "github.com/grafana/loki/pkg/storage" "github.com/grafana/loki/pkg/storage/bucket" "github.com/grafana/loki/pkg/storage/chunk/client" + "github.com/grafana/loki/pkg/storage/chunk/client/alibaba" "github.com/grafana/loki/pkg/storage/chunk/client/aws" "github.com/grafana/loki/pkg/storage/chunk/client/azure" "github.com/grafana/loki/pkg/storage/chunk/client/baidubce" @@ -33,12 +34,13 @@ type RuleStoreConfig struct { Type string `yaml:"type"` // Object Storage Configs - Azure azure.BlobStorageConfig `yaml:"azure" doc:"description=Configures backend rule storage for Azure."` - GCS gcp.GCSConfig `yaml:"gcs" doc:"description=Configures backend rule storage for GCS."` - S3 aws.S3Config `yaml:"s3" doc:"description=Configures backend rule storage for S3."` - BOS baidubce.BOSStorageConfig `yaml:"bos" doc:"description=Configures backend rule storage for Baidu Object Storage (BOS)."` - Swift openstack.SwiftConfig `yaml:"swift" doc:"description=Configures backend rule storage for Swift."` - Local local.Config `yaml:"local" doc:"description=Configures backend rule storage for a local file system directory."` + Azure azure.BlobStorageConfig `yaml:"azure" doc:"description=Configures backend rule storage for Azure."` + AlibabaCloud alibaba.OssConfig `yaml:"alibabacloud" doc:"description=Configures backend rule storage for AlibabaCloud Object Storage (OSS)."` + GCS gcp.GCSConfig `yaml:"gcs" doc:"description=Configures backend rule storage for GCS."` + S3 aws.S3Config `yaml:"s3" doc:"description=Configures backend rule storage for S3."` + BOS baidubce.BOSStorageConfig `yaml:"bos" doc:"description=Configures backend rule storage for Baidu Object Storage (BOS)."` + Swift openstack.SwiftConfig `yaml:"swift" doc:"description=Configures backend rule storage for Swift."` + Local local.Config `yaml:"local" doc:"description=Configures backend rule storage for a local file system directory."` mock rulestore.RuleStore `yaml:"-"` } @@ -46,6 +48,7 @@ type RuleStoreConfig struct { // RegisterFlags registers flags. func (cfg *RuleStoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.Azure.RegisterFlagsWithPrefix("ruler.storage.", f) + cfg.AlibabaCloud.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.GCS.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.S3.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.Swift.RegisterFlagsWithPrefix("ruler.storage.", f) diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index ce91ddbfcf79..f5fa240c55ab 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -1,14 +1,18 @@ package ruler import ( - "bytes" "context" "fmt" - "os" "strings" "time" "github.com/go-kit/log" + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql" + "github.com/grafana/loki/pkg/logql/syntax" + ruler "github.com/grafana/loki/pkg/ruler/base" + "github.com/grafana/loki/pkg/ruler/rulespb" + "github.com/grafana/loki/pkg/ruler/util" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -23,14 +27,6 @@ import ( "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/template" "github.com/weaveworks/common/user" - "gopkg.in/yaml.v3" - - "github.com/grafana/loki/pkg/logproto" - "github.com/grafana/loki/pkg/logql" - "github.com/grafana/loki/pkg/logql/syntax" - ruler "github.com/grafana/loki/pkg/ruler/base" - "github.com/grafana/loki/pkg/ruler/rulespb" - "github.com/grafana/loki/pkg/ruler/util" ) // RulesLimits is the one function we need from limits.Overrides, and @@ -150,6 +146,10 @@ func MultiTenantRuleManager(cfg Config, engine *logql.Engine, overrides RulesLim queryFunc := engineQueryFunc(engine, overrides, registry, userID) memStore := NewMemStore(userID, queryFunc, newMemstoreMetrics(reg), 5*time.Minute, log.With(logger, "subcomponent", "MemStore")) + // GroupLoader builds a cache of the rules as they're loaded by the + // manager.This is used to back the memstore + groupLoader := NewCachingGroupLoader(GroupLoader{}) + mgr := rules.NewManager(&rules.ManagerOptions{ Appendable: registry, Queryable: memStore, @@ -162,57 +162,51 @@ func MultiTenantRuleManager(cfg Config, engine *logql.Engine, overrides RulesLim OutageTolerance: cfg.OutageTolerance, ForGracePeriod: cfg.ForGracePeriod, ResendDelay: cfg.ResendDelay, - GroupLoader: GroupLoader{}, + GroupLoader: groupLoader, }) - // initialize memStore, bound to the manager's alerting rules - memStore.Start(mgr) + cachingManager := &CachingRulesManager{ + manager: mgr, + groupLoader: groupLoader, + } + + memStore.Start(groupLoader) - return mgr + return cachingManager } } -type GroupLoader struct{} +// CachingRulesManager holds a CachingGroupLoader to make sure the GroupLoader +// has consistent state after update operations. Manager needs to hold the same +// caching grouploader +type CachingRulesManager struct { + manager ruler.RulesManager + groupLoader *CachingGroupLoader +} -func (GroupLoader) Parse(query string) (parser.Expr, error) { - expr, err := syntax.ParseExpr(query) +// Update reconciles the state of the CachingGroupLoader after a manager.Update. +// The GroupLoader is mutated as part of a call to Update but it might still +// contain removed files. Update tells the loader which files to keep +func (m *CachingRulesManager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, ruleGroupPostProcessFunc rules.RuleGroupPostProcessFunc) error { + err := m.manager.Update(interval, files, externalLabels, externalURL, ruleGroupPostProcessFunc) if err != nil { - return nil, err + return err } - return exprAdapter{expr}, nil + m.groupLoader.Prune(files) + return nil } -func (g GroupLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) { - b, err := os.ReadFile(identifier) - if err != nil { - return nil, []error{errors.Wrap(err, identifier)} - } - rgs, errs := g.parseRules(b) - for i := range errs { - errs[i] = errors.Wrap(errs[i], identifier) - } - return rgs, errs +func (m *CachingRulesManager) Run() { + m.manager.Run() } -func (GroupLoader) parseRules(content []byte) (*rulefmt.RuleGroups, []error) { - var ( - groups rulefmt.RuleGroups - errs []error - ) - - decoder := yaml.NewDecoder(bytes.NewReader(content)) - decoder.KnownFields(true) - - if err := decoder.Decode(&groups); err != nil { - errs = append(errs, err) - } - - if len(errs) > 0 { - return nil, errs - } +func (m *CachingRulesManager) Stop() { + m.manager.Stop() +} - return &groups, ValidateGroups(groups.Groups...) +func (m *CachingRulesManager) RuleGroups() []*rules.Group { + return m.manager.RuleGroups() } func ValidateGroups(grps ...rulefmt.RuleGroup) (errs []error) { diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index b3e36cf3959d..e22b62397d5b 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -2,9 +2,6 @@ package ruler import ( "context" - "fmt" - "os" - "strings" "testing" "time" @@ -18,262 +15,6 @@ import ( "github.com/grafana/loki/pkg/validation" ) -func Test_Load(t *testing.T) { - for _, tc := range []struct { - desc string - data string - match string - }{ - { - desc: "load correctly", - data: ` -groups: - - name: testgrp2 - interval: 0s - rules: - - alert: HTTPCredentialsLeaked - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) - for: 2m - labels: - severity: page - annotations: - summary: High request latency -`, - }, - { - desc: "fail empty groupname", - match: "Groupname must not be empty", - data: ` -groups: - - name: - interval: 0s - rules: - - alert: HighThroughputLogStreams - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) - for: 2m - labels: - severity: page - annotations: - summary: High request latency -`, - }, - { - desc: "fail duplicate grps", - match: "repeated in the same file", - data: ` -groups: - - name: grp1 - interval: 0s - rules: - - alert: HighThroughputLogStreams - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) - for: 2m - labels: - severity: page - annotations: - summary: High request latency - - name: grp1 - interval: 0s - rules: - - alert: HighThroughputLogStreams - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) - for: 2m - labels: - severity: page - annotations: - summary: High request latency -`, - }, - { - desc: "fail record & alert", - match: "only one of 'record' and 'alert' must be set", - data: ` -groups: - - name: grp1 - interval: 0s - rules: - - alert: HighThroughputLogStreams - record: doublevision - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) - for: 2m - labels: - severity: page - annotations: - summary: High request latency -`, - }, - { - desc: "fail neither record nor alert", - match: "one of 'record' or 'alert' must be set", - data: ` -groups: - - name: grp1 - interval: 0s - rules: - - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) - for: 2m - labels: - severity: page - annotations: - summary: High request latency -`, - }, - { - desc: "fail empty expr", - match: "field 'expr' must be set in rule", - data: ` -groups: - - name: grp1 - interval: 0s - rules: - - alert: HighThroughputLogStreams - for: 2m - labels: - severity: page - annotations: - summary: High request latency -`, - }, - { - desc: "fail bad expr", - match: "could not parse expression", - data: ` -groups: - - name: grp1 - interval: 0s - rules: - - alert: HighThroughputLogStreams - expr: garbage - for: 2m - labels: - severity: page - annotations: - summary: High request latency -`, - }, - { - desc: "fail annotations in recording rule", - match: "invalid field 'annotations' in recording rule", - data: ` -groups: - - name: grp1 - interval: 0s - rules: - - record: HighThroughputLogStreams - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) - labels: - severity: page - annotations: - summary: High request latency -`, - }, - { - desc: "fail for in recording rule", - match: "invalid field 'for' in recording rule", - data: ` -groups: - - name: grp1 - interval: 0s - rules: - - record: HighThroughputLogStreams - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) - for: 2m - labels: - severity: page -`, - }, - { - desc: "fail recording rule name", - match: "invalid recording rule name:", - data: ` -groups: - - name: grp1 - interval: 0s - rules: - - record: 'Hi.ghThroughputLogStreams' - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) -`, - }, - { - desc: "fail invalid label name", - match: "invalid label name:", - data: ` -groups: - - name: grp1 - interval: 0s - rules: - - alert: HighThroughputLogStreams - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) - for: 2m - labels: - 'se.verity': page - annotations: - summary: High request latency -`, - }, - { - desc: "fail invalid annotation", - match: "invalid annotation name:", - data: ` -groups: - - name: grp1 - interval: 0s - rules: - - alert: HighThroughputLogStreams - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) - for: 2m - labels: - severity: page - annotations: - 's.ummary': High request latency -`, - }, - { - desc: "unknown fields", - match: "field unknown not found", - data: ` -unknown: true -groups: - - name: grp1 - interval: 0s - rules: - - alert: HighThroughputLogStreams - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) - for: 2m - labels: - severity: page - annotations: - 's.ummary': High request latency -`, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - var loader GroupLoader - f, err := os.CreateTemp(os.TempDir(), "rules") - require.Nil(t, err) - defer os.Remove(f.Name()) - err = os.WriteFile(f.Name(), []byte(tc.data), 0777) - require.Nil(t, err) - - _, errs := loader.Load(f.Name()) - if tc.match != "" { - require.NotNil(t, errs) - var found bool - for _, err := range errs { - found = found || strings.Contains(err.Error(), tc.match) - } - if !found { - fmt.Printf("\nerrors did not contain desired (%s): %v", tc.match, errs) - } - require.Equal(t, true, found) - } else { - require.Nil(t, errs) - } - }) - - } -} - // TestInvalidRemoteWriteConfig tests that a validation error is raised when config is invalid func TestInvalidRemoteWriteConfig(t *testing.T) { // if remote-write is not enabled, validation fails diff --git a/pkg/ruler/grouploader.go b/pkg/ruler/grouploader.go new file mode 100644 index 000000000000..643e4d3f26a3 --- /dev/null +++ b/pkg/ruler/grouploader.go @@ -0,0 +1,126 @@ +package ruler + +import ( + "bytes" + "github.com/grafana/loki/pkg/logql/syntax" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/rulefmt" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/rules" + "gopkg.in/yaml.v3" + "os" + "sync" +) + +type GroupLoader struct{} + +func (GroupLoader) Parse(query string) (parser.Expr, error) { + expr, err := syntax.ParseExpr(query) + if err != nil { + return nil, err + } + + return exprAdapter{expr}, nil +} + +func (g GroupLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) { + b, err := os.ReadFile(identifier) + if err != nil { + return nil, []error{errors.Wrap(err, identifier)} + } + rgs, errs := g.parseRules(b) + for i := range errs { + errs[i] = errors.Wrap(errs[i], identifier) + } + return rgs, errs +} + +func (GroupLoader) parseRules(content []byte) (*rulefmt.RuleGroups, []error) { + var ( + groups rulefmt.RuleGroups + errs []error + ) + + decoder := yaml.NewDecoder(bytes.NewReader(content)) + decoder.KnownFields(true) + + if err := decoder.Decode(&groups); err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return nil, errs + } + + return &groups, ValidateGroups(groups.Groups...) +} + +type CachingGroupLoader struct { + loader rules.GroupLoader + cache map[string]*rulefmt.RuleGroups + mtx sync.RWMutex +} + +func NewCachingGroupLoader(l rules.GroupLoader) *CachingGroupLoader { + return &CachingGroupLoader{ + loader: l, + cache: make(map[string]*rulefmt.RuleGroups), + } +} + +func (l *CachingGroupLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) { + groups, errs := l.loader.Load(identifier) + if errs != nil { + return nil, errs + } + + l.mtx.Lock() + defer l.mtx.Unlock() + + l.cache[identifier] = groups + + return groups, nil +} + +func (l *CachingGroupLoader) Prune(toKeep []string) { + keep := make(map[string]struct{}, len(toKeep)) + for _, f := range toKeep { + keep[f] = struct{}{} + } + + l.mtx.Lock() + defer l.mtx.Unlock() + + for key := range l.cache { + if _, ok := keep[key]; !ok { + delete(l.cache, key) + } + } +} + +func (l *CachingGroupLoader) AlertingRules() []rulefmt.Rule { + l.mtx.RLock() + defer l.mtx.RUnlock() + + var rules []rulefmt.Rule + for _, group := range l.cache { + for _, g := range group.Groups { + for _, rule := range g.Rules { + rules = append(rules, rulefmt.Rule{ + Record: rule.Record.Value, + Alert: rule.Alert.Value, + Expr: rule.Expr.Value, + For: rule.For, + Labels: rule.Labels, + Annotations: rule.Annotations, + }) + } + } + } + + return rules +} + +func (l *CachingGroupLoader) Parse(query string) (parser.Expr, error) { + return l.loader.Parse(query) +} diff --git a/pkg/ruler/grouploader_test.go b/pkg/ruler/grouploader_test.go new file mode 100644 index 000000000000..0b334079ebbe --- /dev/null +++ b/pkg/ruler/grouploader_test.go @@ -0,0 +1,376 @@ +package ruler + +import ( + "fmt" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/rulefmt" + "github.com/prometheus/prometheus/promql/parser" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" + "os" + "strings" + "testing" +) + +func Test_GroupLoader(t *testing.T) { + for _, tc := range []struct { + desc string + data string + match string + }{ + { + desc: "load correctly", + data: ` +groups: + - name: testgrp2 + interval: 0s + rules: + - alert: HTTPCredentialsLeaked + expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) + for: 2m + labels: + severity: page + annotations: + summary: High request latency +`, + }, + { + desc: "fail empty groupname", + match: "Groupname must not be empty", + data: ` +groups: + - name: + interval: 0s + rules: + - alert: HighThroughputLogStreams + expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) + for: 2m + labels: + severity: page + annotations: + summary: High request latency +`, + }, + { + desc: "fail duplicate grps", + match: "repeated in the same file", + data: ` +groups: + - name: grp1 + interval: 0s + rules: + - alert: HighThroughputLogStreams + expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) + for: 2m + labels: + severity: page + annotations: + summary: High request latency + - name: grp1 + interval: 0s + rules: + - alert: HighThroughputLogStreams + expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) + for: 2m + labels: + severity: page + annotations: + summary: High request latency +`, + }, + { + desc: "fail record & alert", + match: "only one of 'record' and 'alert' must be set", + data: ` +groups: + - name: grp1 + interval: 0s + rules: + - alert: HighThroughputLogStreams + record: doublevision + expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) + for: 2m + labels: + severity: page + annotations: + summary: High request latency +`, + }, + { + desc: "fail neither record nor alert", + match: "one of 'record' or 'alert' must be set", + data: ` +groups: + - name: grp1 + interval: 0s + rules: + - expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) + for: 2m + labels: + severity: page + annotations: + summary: High request latency +`, + }, + { + desc: "fail empty expr", + match: "field 'expr' must be set in rule", + data: ` +groups: + - name: grp1 + interval: 0s + rules: + - alert: HighThroughputLogStreams + for: 2m + labels: + severity: page + annotations: + summary: High request latency +`, + }, + { + desc: "fail bad expr", + match: "could not parse expression", + data: ` +groups: + - name: grp1 + interval: 0s + rules: + - alert: HighThroughputLogStreams + expr: garbage + for: 2m + labels: + severity: page + annotations: + summary: High request latency +`, + }, + { + desc: "fail annotations in recording rule", + match: "invalid field 'annotations' in recording rule", + data: ` +groups: + - name: grp1 + interval: 0s + rules: + - record: HighThroughputLogStreams + expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) + labels: + severity: page + annotations: + summary: High request latency +`, + }, + { + desc: "fail for in recording rule", + match: "invalid field 'for' in recording rule", + data: ` +groups: + - name: grp1 + interval: 0s + rules: + - record: HighThroughputLogStreams + expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) + for: 2m + labels: + severity: page +`, + }, + { + desc: "fail recording rule name", + match: "invalid recording rule name:", + data: ` +groups: + - name: grp1 + interval: 0s + rules: + - record: 'Hi.ghThroughputLogStreams' + expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) +`, + }, + { + desc: "fail invalid label name", + match: "invalid label name:", + data: ` +groups: + - name: grp1 + interval: 0s + rules: + - alert: HighThroughputLogStreams + expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) + for: 2m + labels: + 'se.verity': page + annotations: + summary: High request latency +`, + }, + { + desc: "fail invalid annotation", + match: "invalid annotation name:", + data: ` +groups: + - name: grp1 + interval: 0s + rules: + - alert: HighThroughputLogStreams + expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) + for: 2m + labels: + severity: page + annotations: + 's.ummary': High request latency +`, + }, + { + desc: "unknown fields", + match: "field unknown not found", + data: ` +unknown: true +groups: + - name: grp1 + interval: 0s + rules: + - alert: HighThroughputLogStreams + expr: sum by (cluster, job, pod) (rate({namespace=~"%s"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0) + for: 2m + labels: + severity: page + annotations: + 's.ummary': High request latency +`, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + var loader GroupLoader + f, err := os.CreateTemp(os.TempDir(), "rules") + require.Nil(t, err) + defer os.Remove(f.Name()) + err = os.WriteFile(f.Name(), []byte(tc.data), 0777) + require.Nil(t, err) + + _, errs := loader.Load(f.Name()) + if tc.match != "" { + require.NotNil(t, errs) + var found bool + for _, err := range errs { + found = found || strings.Contains(err.Error(), tc.match) + } + if !found { + fmt.Printf("\nerrors did not contain desired (%s): %v", tc.match, errs) + } + require.Equal(t, true, found) + } else { + require.Nil(t, errs) + } + }) + } +} + +func TestCachingGroupLoader(t *testing.T) { + t.Run("it caches rules as they are loaded from the underlying loader", func(t *testing.T) { + l := newFakeGroupLoader() + l.ruleGroups = map[string]*rulefmt.RuleGroups{ + "filename1": ruleGroup1, + "filename2": ruleGroup2, + } + + cl := NewCachingGroupLoader(l) + + groups, errs := cl.Load("filename1") + require.Nil(t, errs) + require.Equal(t, groups, ruleGroup1) + + rules := cl.AlertingRules() + require.Equal(t, rulefmt.Rule{Alert: "alert-1-name"}, rules[0]) + + groups, errs = cl.Load("filename2") + require.Nil(t, errs) + require.Equal(t, groups, ruleGroup2) + + rules = cl.AlertingRules() + require.ElementsMatch(t, []rulefmt.Rule{{Alert: "alert-1-name"}, {Alert: "alert-2-name"}}, rules) + }) + + t.Run("it doesn't cache rules when the loader has an error", func(t *testing.T) { + l := newFakeGroupLoader() + l.loadErrs = []error{errors.New("something bad")} + l.ruleGroups = map[string]*rulefmt.RuleGroups{ + "filename1": ruleGroup1, + "filename2": ruleGroup2, + } + + cl := NewCachingGroupLoader(l) + + groups, errs := cl.Load("filename1") + require.Equal(t, l.loadErrs, errs) + require.Nil(t, groups) + + rules := cl.AlertingRules() + require.Len(t, rules, 0) + }) + + t.Run("it removes files that are not in the list", func(t *testing.T) { + l := newFakeGroupLoader() + l.ruleGroups = map[string]*rulefmt.RuleGroups{ + "filename1": ruleGroup1, + "filename2": ruleGroup2, + } + + cl := NewCachingGroupLoader(l) + + _, errs := cl.Load("filename1") + require.Nil(t, errs) + + _, errs = cl.Load("filename2") + require.Nil(t, errs) + + cl.Prune([]string{"filename2"}) + + rules := cl.AlertingRules() + require.Len(t, rules, 1) + require.Equal(t, rulefmt.Rule{Alert: "alert-2-name"}, rules[0]) + }) +} + +func newFakeGroupLoader() *fakeGroupLoader { + return &fakeGroupLoader{ + ruleGroups: make(map[string]*rulefmt.RuleGroups), + } +} + +type fakeGroupLoader struct { + ruleGroups map[string]*rulefmt.RuleGroups + loadErrs []error + expr parser.Expr + parseErr error +} + +func (gl *fakeGroupLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) { + return gl.ruleGroups[identifier], gl.loadErrs +} + +func (gl *fakeGroupLoader) Parse(query string) (parser.Expr, error) { + return gl.expr, gl.parseErr +} + +var ( + ruleGroup1 = &rulefmt.RuleGroups{ + Groups: []rulefmt.RuleGroup{ + { + Rules: []rulefmt.RuleNode{ + {Alert: yaml.Node{Value: "alert-1-name"}}, + }, + }, + }, + } + ruleGroup2 = &rulefmt.RuleGroups{ + Groups: []rulefmt.RuleGroup{ + { + Rules: []rulefmt.RuleNode{ + {Alert: yaml.Node{Value: "alert-2-name"}}, + }, + }, + }, + } +) diff --git a/pkg/ruler/memstore.go b/pkg/ruler/memstore.go index 38b56e3a2d24..998299f66894 100644 --- a/pkg/ruler/memstore.go +++ b/pkg/ruler/memstore.go @@ -3,6 +3,7 @@ package ruler import ( "context" "errors" + "github.com/prometheus/prometheus/model/rulefmt" "sync" "time" @@ -57,7 +58,7 @@ func newMemstoreMetrics(r prometheus.Registerer) *memstoreMetrics { } type RuleIter interface { - AlertingRules() []*rules.AlertingRule + AlertingRules() []rulefmt.Rule } type MemStore struct { @@ -136,7 +137,7 @@ func (m *MemStore) run() { m.mtx.Lock() holdDurs := make(map[string]time.Duration) for _, rule := range m.mgr.AlertingRules() { - holdDurs[rule.Name()] = rule.HoldDuration() + holdDurs[rule.Alert] = time.Duration(rule.For) } for ruleKey, cache := range m.rules { @@ -200,18 +201,8 @@ func (m *memStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m return storage.NoopSeriesSet() } - var rule *rules.AlertingRule - - // go fetch the rule via the alertname - for _, x := range m.mgr.AlertingRules() { - if x.Name() == ruleKey { - rule = x - break - } - } - - // should not happen - if rule == nil { + rule, ok := m.findRule(ruleKey) + if !ok { level.Error(m.logger).Log("msg", "failure trying to restore for state for untracked alerting rule", "name", ruleKey) return storage.NoopSeriesSet() } @@ -249,8 +240,9 @@ func (m *memStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m // see if alert condition had any inhabitants at ts-forDuration. We can assume it's still firing because // that's the only condition under which this is queried (via RestoreForState). - checkTime := m.ts.Add(-rule.HoldDuration()) - vec, err := m.queryFunc(m.ctx, rule.Query().String(), checkTime) + holDuration := time.Duration(rule.For) + checkTime := m.ts.Add(-holDuration) + vec, err := m.queryFunc(m.ctx, rule.Expr, checkTime) if err != nil { level.Info(m.logger).Log("msg", "error querying for rule", "rule", ruleKey, "err", err.Error()) m.metrics.evaluations.WithLabelValues(statusFailure, m.userID).Inc() @@ -267,7 +259,7 @@ func (m *memStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m ts := util.TimeToMillis(m.ts) forStateVec = append(forStateVec, promql.Sample{ - Metric: ForStateMetric(smpl.Metric, rule.Name()), + Metric: ForStateMetric(smpl.Metric, rule.Alert), Point: promql.Point{ T: ts, V: float64(checkTime.Unix()), @@ -295,6 +287,16 @@ func (m *memStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m ) } +func (m *memStoreQuerier) findRule(name string) (rulefmt.Rule, bool) { + // go fetch the rule via the alertname + for _, rule := range m.mgr.AlertingRules() { + if rule.Alert == name { + return rule, true + } + } + return rulefmt.Rule{}, false +} + // LabelValues returns all potential values for a label name. func (*memStoreQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { return nil, nil, errors.New("unimplemented") diff --git a/pkg/ruler/memstore_test.go b/pkg/ruler/memstore_test.go index 8eff45b5db7c..9a9ed03625f4 100644 --- a/pkg/ruler/memstore_test.go +++ b/pkg/ruler/memstore_test.go @@ -2,13 +2,14 @@ package ruler import ( "context" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/rulefmt" "testing" "time" "github.com/go-kit/log" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/require" @@ -16,11 +17,6 @@ import ( "github.com/grafana/loki/pkg/util" ) -var ( - NilMetrics = newMemstoreMetrics(nil) - NilLogger = log.NewNopLogger() -) - const ruleName = "testrule" func labelsToMatchers(ls labels.Labels) (res []*labels.Matcher) { @@ -30,29 +26,24 @@ func labelsToMatchers(ls labels.Labels) (res []*labels.Matcher) { return res } -type MockRuleIter []*rules.AlertingRule +type MockRuleIter []rulefmt.Rule -func (xs MockRuleIter) AlertingRules() []*rules.AlertingRule { return xs } +func (xs MockRuleIter) AlertingRules() []rulefmt.Rule { return xs } func testStore(queryFunc rules.QueryFunc) *MemStore { - return NewMemStore("test", queryFunc, NilMetrics, time.Minute, NilLogger) + return NewMemStore("test", queryFunc, newMemstoreMetrics(nil), time.Minute, log.NewNopLogger()) } func TestSelectRestores(t *testing.T) { forDuration := time.Minute - ars := []*rules.AlertingRule{ - rules.NewAlertingRule( - ruleName, - &parser.StringLiteral{Val: "unused"}, - forDuration, - labels.FromMap(map[string]string{"foo": "bar"}), - nil, - nil, - "", - false, - NilLogger, - ), + ars := []rulefmt.Rule{ + { + Alert: ruleName, + Expr: "unused", + For: model.Duration(forDuration), + Labels: map[string]string{"foo": "bar"}, + }, } callCount := 0 @@ -103,7 +94,7 @@ func TestSelectRestores(t *testing.T) { require.Equal(t, true, sset.Next()) require.Equal(t, ls, sset.At().Labels()) - iter := sset.At().Iterator() + iter := sset.At().Iterator(nil) require.Equal(t, chunkenc.ValFloat, iter.Next()) ts, v := iter.At() require.Equal(t, now, ts) @@ -120,7 +111,7 @@ func TestSelectRestores(t *testing.T) { sset = q.Select(false, nil, labelsToMatchers(ls)...) require.Equal(t, true, sset.Next()) require.Equal(t, ls, sset.At().Labels()) - iter = sset.At().Iterator() + iter = sset.At().Iterator(iter) require.Equal(t, chunkenc.ValFloat, iter.Next()) ts, v = iter.At() require.Equal(t, now, ts) @@ -141,18 +132,13 @@ func TestSelectRestores(t *testing.T) { } func TestMemstoreStart(t *testing.T) { - ars := []*rules.AlertingRule{ - rules.NewAlertingRule( - ruleName, - &parser.StringLiteral{Val: "unused"}, - time.Minute, - labels.FromMap(map[string]string{"foo": "bar"}), - nil, - nil, - "", - false, - NilLogger, - ), + ars := []rulefmt.Rule{ + { + Alert: ruleName, + Expr: "unused", + For: model.Duration(time.Minute), + Labels: map[string]string{"foo": "bar"}, + }, } fn := rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { @@ -179,18 +165,13 @@ func TestMemStoreStopBeforeStart(t *testing.T) { } func TestMemstoreBlocks(t *testing.T) { - ars := []*rules.AlertingRule{ - rules.NewAlertingRule( - ruleName, - &parser.StringLiteral{Val: "unused"}, - time.Minute, - labels.FromMap(map[string]string{"foo": "bar"}), - nil, - nil, - "", - false, - NilLogger, - ), + ars := []rulefmt.Rule{ + { + Alert: ruleName, + Expr: "unused", + For: model.Duration(time.Minute), + Labels: map[string]string{"foo": "bar"}, + }, } fn := rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { @@ -217,5 +198,4 @@ func TestMemstoreBlocks(t *testing.T) { case <-time.After(time.Millisecond): t.FailNow() } - } diff --git a/pkg/ruler/registry.go b/pkg/ruler/registry.go index f87409e6381a..eab7005b76ff 100644 --- a/pkg/ruler/registry.go +++ b/pkg/ruler/registry.go @@ -371,7 +371,7 @@ func (n notReadyAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { return 0, errNotReady } -func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { +func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { return 0, errNotReady } func (n notReadyAppender) Commit() error { return errNotReady } @@ -388,7 +388,7 @@ func (n discardingAppender) AppendExemplar(ref storage.SeriesRef, l labels.Label func (n discardingAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { return 0, nil } -func (n discardingAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { +func (n discardingAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { return 0, nil } func (n discardingAppender) Commit() error { return nil } diff --git a/pkg/ruler/storage/instance/instance_test.go b/pkg/ruler/storage/instance/instance_test.go index e1c99a3ae42a..2c223af517c4 100644 --- a/pkg/ruler/storage/instance/instance_test.go +++ b/pkg/ruler/storage/instance/instance_test.go @@ -264,7 +264,7 @@ func (a *mockAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m return 0, nil } -func (a *mockAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) { +func (a *mockAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { return 0, nil } diff --git a/pkg/ruler/storage/wal/util.go b/pkg/ruler/storage/wal/util.go index ade23a037f3e..ba5e4899be39 100644 --- a/pkg/ruler/storage/wal/util.go +++ b/pkg/ruler/storage/wal/util.go @@ -132,6 +132,11 @@ func (c *walDataCollector) AppendHistograms(histograms []record.RefHistogramSamp return true } +func (c *walDataCollector) AppendFloatHistograms(histograms []record.RefFloatHistogramSample) bool { + // TODO: support native histograms + return true +} + func (c *walDataCollector) UpdateSeriesSegment(series []record.RefSeries, index int) {} func (c *walDataCollector) SeriesReset(_ int) {} diff --git a/pkg/ruler/storage/wal/wal.go b/pkg/ruler/storage/wal/wal.go index b82b5f2cd5ab..7ee7d776d018 100644 --- a/pkg/ruler/storage/wal/wal.go +++ b/pkg/ruler/storage/wal/wal.go @@ -636,7 +636,7 @@ func (a *appender) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metada return 0, nil } -func (a *appender) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram) (storage.SeriesRef, error) { +func (a *appender) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { // TODO: support native histograms return 0, nil } diff --git a/pkg/sizing/node.go b/pkg/sizing/node.go index 0f6d7405209a..559d840356ca 100644 --- a/pkg/sizing/node.go +++ b/pkg/sizing/node.go @@ -79,4 +79,27 @@ var NodeTypesByProvider = map[string]map[string]NodeType{ writePod: StandardWrite, }, }, + "OVHcloud": { + "b2-30": { + name: "b2-30", + cores: 8, + memoryGB: 30, + readPod: StandardRead, + writePod: StandardWrite, + }, + "b2-60": { + name: "b2-60", + cores: 16, + memoryGB: 60, + readPod: StandardRead, + writePod: StandardWrite, + }, + "b2-120": { + name: "b2-120", + cores: 32, + memoryGB: 120, + readPod: StandardRead, + writePod: StandardWrite, + }, + }, } diff --git a/pkg/storage/bucket/s3/bucket_client.go b/pkg/storage/bucket/s3/bucket_client.go index 51148271b8df..39efd52184d8 100644 --- a/pkg/storage/bucket/s3/bucket_client.go +++ b/pkg/storage/bucket/s3/bucket_client.go @@ -7,6 +7,11 @@ import ( "github.com/thanos-io/objstore/providers/s3" ) +const ( + // Applied to PUT operations to denote the desired storage class for S3 Objects + awsStorageClassHeader = "X-Amz-Storage-Class" +) + // NewBucketClient creates a new S3 bucket client func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucket, error) { s3Cfg, err := newS3Config(cfg) @@ -34,13 +39,15 @@ func newS3Config(cfg Config) (s3.Config, error) { } return s3.Config{ - Bucket: cfg.BucketName, - Endpoint: cfg.Endpoint, - Region: cfg.Region, - AccessKey: cfg.AccessKeyID, - SecretKey: cfg.SecretAccessKey.String(), - Insecure: cfg.Insecure, - SSEConfig: sseCfg, + Bucket: cfg.BucketName, + Endpoint: cfg.Endpoint, + Region: cfg.Region, + AccessKey: cfg.AccessKeyID, + SecretKey: cfg.SecretAccessKey.String(), + SessionToken: cfg.SessionToken.String(), + Insecure: cfg.Insecure, + SSEConfig: sseCfg, + PutUserMetadata: map[string]string{awsStorageClassHeader: cfg.StorageClass}, HTTPConfig: s3.HTTPConfig{ IdleConnTimeout: model.Duration(cfg.HTTP.IdleConnTimeout), ResponseHeaderTimeout: model.Duration(cfg.HTTP.ResponseHeaderTimeout), diff --git a/pkg/storage/bucket/s3/config.go b/pkg/storage/bucket/s3/config.go index ff7585cc04fe..fb3bd1a08500 100644 --- a/pkg/storage/bucket/s3/config.go +++ b/pkg/storage/bucket/s3/config.go @@ -13,6 +13,7 @@ import ( "github.com/thanos-io/objstore/providers/s3" bucket_http "github.com/grafana/loki/pkg/storage/bucket/http" + "github.com/grafana/loki/pkg/storage/common/aws" "github.com/grafana/loki/pkg/util" ) @@ -30,8 +31,9 @@ const ( ) var ( - supportedSignatureVersions = []string{SignatureVersionV4, SignatureVersionV2} - supportedSSETypes = []string{SSEKMS, SSES3} + supportedSignatureVersions = []string{SignatureVersionV4, SignatureVersionV2} + supportedSSETypes = []string{SSEKMS, SSES3} + errUnsupportedSignatureVersion = errors.New("unsupported signature version") errUnsupportedSSEType = errors.New("unsupported S3 SSE type") errInvalidSSEContext = errors.New("invalid S3 SSE encryption context") @@ -56,9 +58,11 @@ type Config struct { Region string `yaml:"region"` BucketName string `yaml:"bucket_name"` SecretAccessKey flagext.Secret `yaml:"secret_access_key"` + SessionToken flagext.Secret `yaml:"session_token"` AccessKeyID string `yaml:"access_key_id"` Insecure bool `yaml:"insecure"` SignatureVersion string `yaml:"signature_version"` + StorageClass string `yaml:"storage_class"` SSE SSEConfig `yaml:"sse"` HTTP HTTPConfig `yaml:"http"` @@ -73,11 +77,13 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.AccessKeyID, prefix+"s3.access-key-id", "", "S3 access key ID") f.Var(&cfg.SecretAccessKey, prefix+"s3.secret-access-key", "S3 secret access key") + f.Var(&cfg.SessionToken, prefix+"s3.session-token", "S3 session token") f.StringVar(&cfg.BucketName, prefix+"s3.bucket-name", "", "S3 bucket name") f.StringVar(&cfg.Region, prefix+"s3.region", "", "S3 region. If unset, the client will issue a S3 GetBucketLocation API call to autodetect it.") f.StringVar(&cfg.Endpoint, prefix+"s3.endpoint", "", "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.") f.BoolVar(&cfg.Insecure, prefix+"s3.insecure", false, "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.") f.StringVar(&cfg.SignatureVersion, prefix+"s3.signature-version", SignatureVersionV4, fmt.Sprintf("The signature version to use for authenticating against S3. Supported values are: %s.", strings.Join(supportedSignatureVersions, ", "))) + f.StringVar(&cfg.StorageClass, prefix+"s3.storage-class", aws.StorageClassStandard, "The S3 storage class to use. Details can be found at https://aws.amazon.com/s3/storage-classes/.") cfg.SSE.RegisterFlagsWithPrefix(prefix+"s3.sse.", f) cfg.HTTP.RegisterFlagsWithPrefix(prefix, f) } @@ -88,6 +94,10 @@ func (cfg *Config) Validate() error { return errUnsupportedSignatureVersion } + if err := aws.ValidateStorageClass(cfg.StorageClass); err != nil { + return err + } + return cfg.SSE.Validate() } diff --git a/pkg/storage/bucket/s3/config_test.go b/pkg/storage/bucket/s3/config_test.go index 91d924d6d46d..5e6b9f9545a2 100644 --- a/pkg/storage/bucket/s3/config_test.go +++ b/pkg/storage/bucket/s3/config_test.go @@ -2,7 +2,9 @@ package s3 import ( "encoding/base64" + "fmt" "net/http" + "strings" "testing" "time" @@ -12,11 +14,13 @@ import ( "gopkg.in/yaml.v2" bucket_http "github.com/grafana/loki/pkg/storage/bucket/http" + "github.com/grafana/loki/pkg/storage/common/aws" ) // defaultConfig should match the default flag values defined in RegisterFlagsWithPrefix. var defaultConfig = Config{ SignatureVersion: SignatureVersionV4, + StorageClass: aws.StorageClassStandard, HTTP: HTTPConfig{ Config: bucket_http.Config{ IdleConnTimeout: 90 * time.Second, @@ -53,6 +57,7 @@ secret_access_key: test-secret-access-key access_key_id: test-access-key-id insecure: true signature_version: test-signature-version +storage_class: test-storage-class sse: type: test-type kms_key_id: test-kms-key-id @@ -75,6 +80,7 @@ http: AccessKeyID: "test-access-key-id", Insecure: true, SignatureVersion: "test-signature-version", + StorageClass: "test-storage-class", SSE: SSEConfig{ Type: "test-type", KMSKeyID: "test-kms-key-id", @@ -218,3 +224,32 @@ func TestParseKMSEncryptionContext(t *testing.T) { assert.NoError(t, err) assert.Equal(t, expected, actual) } + +func TestConfig_Validate(t *testing.T) { + tests := map[string]struct { + cfg Config + expectedErr error + }{ + "should fail if invalid signature version is set": { + Config{SignatureVersion: "foo"}, + errUnsupportedSignatureVersion, + }, + "should pass if valid signature version is set": { + defaultConfig, + nil, + }, + "should fail if invalid storage class is set": { + Config{SignatureVersion: SignatureVersionV4, StorageClass: "foo"}, + fmt.Errorf("unsupported S3 storage class: foo. Supported values: %s", strings.Join(aws.SupportedStorageClasses, ", ")), + }, + "should pass if valid storage signature version is set": { + Config{SignatureVersion: SignatureVersionV4, StorageClass: aws.StorageClassStandardInfrequentAccess}, + nil, + }, + } + + for name, test := range tests { + actual := test.cfg.Validate() + assert.Equal(t, test.expectedErr, actual, name) + } +} diff --git a/pkg/storage/bucket/swift/config.go b/pkg/storage/bucket/swift/config.go index 4e838f9c38ca..a30dd7319e8c 100644 --- a/pkg/storage/bucket/swift/config.go +++ b/pkg/storage/bucket/swift/config.go @@ -9,6 +9,7 @@ import ( type Config struct { AuthVersion int `yaml:"auth_version"` AuthURL string `yaml:"auth_url"` + Internal bool `yaml:"internal"` Username string `yaml:"username"` UserDomainName string `yaml:"user_domain_name"` UserDomainID string `yaml:"user_domain_id"` @@ -36,6 +37,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.IntVar(&cfg.AuthVersion, prefix+"swift.auth-version", 0, "OpenStack Swift authentication API version. 0 to autodetect.") f.StringVar(&cfg.AuthURL, prefix+"swift.auth-url", "", "OpenStack Swift authentication URL") + f.BoolVar(&cfg.Internal, prefix+"swift.internal", false, "Set this to true to use the internal OpenStack Swift endpoint URL") f.StringVar(&cfg.Username, prefix+"swift.username", "", "OpenStack Swift username.") f.StringVar(&cfg.UserDomainName, prefix+"swift.user-domain-name", "", "OpenStack Swift user's domain name.") f.StringVar(&cfg.UserDomainID, prefix+"swift.user-domain-id", "", "OpenStack Swift user's domain ID.") diff --git a/pkg/storage/chunk/client/alibaba/oss_object_client.go b/pkg/storage/chunk/client/alibaba/oss_object_client.go new file mode 100644 index 000000000000..71a6f13720c8 --- /dev/null +++ b/pkg/storage/chunk/client/alibaba/oss_object_client.go @@ -0,0 +1,164 @@ +package alibaba + +import ( + "context" + "flag" + "io" + "net/http" + "strconv" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/weaveworks/common/instrument" + + "github.com/grafana/loki/pkg/storage/chunk/client" +) + +const NoSuchKeyErr = "NoSuchKey" + +var ossRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "loki", + Name: "oss_request_duration_seconds", + Help: "Time spent doing OSS requests.", + Buckets: prometheus.ExponentialBuckets(0.005, 4, 7), +}, []string{"operation", "status_code"})) + +func init() { + ossRequestDuration.Register() +} + +type OssObjectClient struct { + defaultBucket *oss.Bucket +} + +// OssConfig is config for the OSS Chunk Client. +type OssConfig struct { + Bucket string `yaml:"bucket"` + Endpoint string `yaml:"endpoint"` + AccessKeyID string `yaml:"access_key_id"` + SecretAccessKey string `yaml:"secret_access_key"` +} + +// RegisterFlags registers flags. +func (cfg *OssConfig) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("", f) +} + +// RegisterFlagsWithPrefix registers flags with prefix. +func (cfg *OssConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.Bucket, prefix+"oss.bucketname", "", "Name of OSS bucket.") + f.StringVar(&cfg.Endpoint, prefix+"oss.endpoint", "", "oss Endpoint to connect to.") + f.StringVar(&cfg.AccessKeyID, prefix+"oss.access-key-id", "", "alibabacloud Access Key ID") + f.StringVar(&cfg.SecretAccessKey, prefix+"oss.secret-access-key", "", "alibabacloud Secret Access Key") +} + +// NewOssObjectClient makes a new chunk.Client that writes chunks to OSS. +func NewOssObjectClient(ctx context.Context, cfg OssConfig) (client.ObjectClient, error) { + client, err := oss.New(cfg.Endpoint, cfg.AccessKeyID, cfg.SecretAccessKey) + if err != nil { + return nil, err + } + bucket, err := client.Bucket(cfg.Bucket) + if err != nil { + return nil, err + } + return &OssObjectClient{ + defaultBucket: bucket, + }, nil +} + +func (s *OssObjectClient) Stop() { +} + +// GetObject returns a reader and the size for the specified object key from the configured OSS bucket. +func (s *OssObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) { + var resp *oss.GetObjectResult + var options []oss.Option + err := instrument.CollectedRequest(ctx, "OSS.GetObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var requestErr error + resp, requestErr = s.defaultBucket.DoGetObject(&oss.GetObjectRequest{ObjectKey: objectKey}, options) + if requestErr != nil { + return requestErr + } + return nil + }) + if err != nil { + return nil, 0, err + } + length := resp.Response.Headers.Get("Content-Length") + size, err := strconv.Atoi(length) + if err != nil { + return nil, 0, err + } + return resp.Response.Body, int64(size), err + +} + +// PutObject puts the specified bytes into the configured OSS bucket at the provided key +func (s *OssObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { + return instrument.CollectedRequest(ctx, "OSS.PutObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + if err := s.defaultBucket.PutObject(objectKey, object); err != nil { + return errors.Wrap(err, "failed to put oss object") + } + return nil + }) + +} + +// List implements chunk.ObjectClient. +func (s *OssObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) { + var storageObjects []client.StorageObject + var commonPrefixes []client.StorageCommonPrefix + marker := oss.Marker("") + for { + if ctx.Err() != nil { + return nil, nil, ctx.Err() + } + + objects, err := s.defaultBucket.ListObjects(oss.Prefix(prefix), oss.Delimiter(delimiter), marker) + if err != nil { + return nil, nil, errors.Wrap(err, "list alibaba oss bucket failed") + } + marker = oss.Marker(objects.NextMarker) + for _, object := range objects.Objects { + storageObjects = append(storageObjects, client.StorageObject{ + Key: object.Key, + ModifiedAt: object.LastModified, + }) + } + for _, object := range objects.CommonPrefixes { + if object != "" { + commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(object)) + } + } + if !objects.IsTruncated { + break + } + } + return storageObjects, commonPrefixes, nil +} + +// DeleteObject deletes the specified object key from the configured OSS bucket. +func (s *OssObjectClient) DeleteObject(ctx context.Context, objectKey string) error { + return instrument.CollectedRequest(ctx, "OSS.DeleteObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := s.defaultBucket.DeleteObject(objectKey) + if err != nil { + return err + } + return nil + }) +} + +// IsObjectNotFoundErr returns true if error means that object is not found. Relevant to GetObject and DeleteObject operations. +func (s *OssObjectClient) IsObjectNotFoundErr(err error) bool { + switch caseErr := err.(type) { + case oss.ServiceError: + if caseErr.Code == NoSuchKeyErr && caseErr.StatusCode == http.StatusNotFound { + return true + } + return false + default: + return false + } +} diff --git a/pkg/storage/chunk/client/aws/dynamodb_storage_client.go b/pkg/storage/chunk/client/aws/dynamodb_storage_client.go index 4d3a23d557f2..ad3285aa3bf7 100644 --- a/pkg/storage/chunk/client/aws/dynamodb_storage_client.go +++ b/pkg/storage/chunk/client/aws/dynamodb_storage_client.go @@ -64,6 +64,7 @@ type DynamoDBConfig struct { ChunkGangSize int `yaml:"chunk_gang_size"` ChunkGetMaxParallelism int `yaml:"chunk_get_max_parallelism"` BackoffConfig backoff.Config `yaml:"backoff_config"` + KMSKeyID string `yaml:"kms_key_id"` } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -77,6 +78,7 @@ func (cfg *DynamoDBConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.BackoffConfig.MinBackoff, "dynamodb.min-backoff", 100*time.Millisecond, "Minimum backoff time") f.DurationVar(&cfg.BackoffConfig.MaxBackoff, "dynamodb.max-backoff", 50*time.Second, "Maximum backoff time") f.IntVar(&cfg.BackoffConfig.MaxRetries, "dynamodb.max-retries", 20, "Maximum number of times to retry an operation") + f.StringVar(&cfg.KMSKeyID, "dynamodb.kms-key-id", "", "KMS key used for encrypting DynamoDB items. DynamoDB will use an Amazon owned KMS key if not provided.") cfg.Metrics.RegisterFlags(f) } diff --git a/pkg/storage/chunk/client/aws/dynamodb_table_client.go b/pkg/storage/chunk/client/aws/dynamodb_table_client.go index 45c6d683ee82..21ae7fd58eae 100644 --- a/pkg/storage/chunk/client/aws/dynamodb_table_client.go +++ b/pkg/storage/chunk/client/aws/dynamodb_table_client.go @@ -39,6 +39,7 @@ type dynamoTableClient struct { callManager callManager autoscale autoscale metrics *dynamoDBMetrics + kmsKeyID string } // NewDynamoDBTableClient makes a new DynamoTableClient. @@ -66,6 +67,7 @@ func NewDynamoDBTableClient(cfg DynamoDBConfig, reg prometheus.Registerer) (inde callManager: callManager, autoscale: autoscale, metrics: newMetrics(reg), + kmsKeyID: cfg.KMSKeyID, }, nil } @@ -161,6 +163,15 @@ func (d dynamoTableClient) CreateTable(ctx context.Context, desc config.TableDes } } + if d.kmsKeyID != "" { + sseSpecification := &dynamodb.SSESpecification{ + Enabled: aws.Bool(true), + SSEType: aws.String(dynamodb.SSETypeKms), + KMSMasterKeyId: aws.String(d.kmsKeyID), + } + input.SetSSESpecification(sseSpecification) + } + output, err := d.DynamoDB.CreateTableWithContext(ctx, input) if err != nil { return err diff --git a/pkg/storage/chunk/client/aws/s3_storage_client.go b/pkg/storage/chunk/client/aws/s3_storage_client.go index 7b23cecbb7df..a1f1b25f0da2 100644 --- a/pkg/storage/chunk/client/aws/s3_storage_client.go +++ b/pkg/storage/chunk/client/aws/s3_storage_client.go @@ -33,6 +33,7 @@ import ( bucket_s3 "github.com/grafana/loki/pkg/storage/bucket/s3" "github.com/grafana/loki/pkg/storage/chunk/client" "github.com/grafana/loki/pkg/storage/chunk/client/hedging" + storageawscommon "github.com/grafana/loki/pkg/storage/common/aws" "github.com/grafana/loki/pkg/util" ) @@ -44,6 +45,7 @@ const ( var ( supportedSignatureVersions = []string{SignatureVersionV4, SignatureVersionV2} errUnsupportedSignatureVersion = errors.New("unsupported signature version") + errUnsupportedStorageClass = errors.New("unsupported S3 storage class") ) var s3RequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ @@ -71,10 +73,12 @@ type S3Config struct { Region string `yaml:"region"` AccessKeyID string `yaml:"access_key_id"` SecretAccessKey flagext.Secret `yaml:"secret_access_key"` + SessionToken flagext.Secret `yaml:"session_token"` Insecure bool `yaml:"insecure"` SSEEncryption bool `yaml:"sse_encryption"` HTTPConfig HTTPConfig `yaml:"http_config"` SignatureVersion string `yaml:"signature_version"` + StorageClass string `yaml:"storage_class"` SSEConfig bucket_s3.SSEConfig `yaml:"sse"` BackoffConfig backoff.Config `yaml:"backoff_config" doc:"description=Configures back off when S3 get Object."` @@ -105,6 +109,7 @@ func (cfg *S3Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.Region, prefix+"s3.region", "", "AWS region to use.") f.StringVar(&cfg.AccessKeyID, prefix+"s3.access-key-id", "", "AWS Access Key ID") f.Var(&cfg.SecretAccessKey, prefix+"s3.secret-access-key", "AWS Secret Access Key") + f.Var(&cfg.SessionToken, prefix+"s3.session-token", "AWS Session Token") f.BoolVar(&cfg.Insecure, prefix+"s3.insecure", false, "Disable https on s3 connection.") // TODO Remove in Cortex 1.10.0 @@ -117,6 +122,7 @@ func (cfg *S3Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.BoolVar(&cfg.HTTPConfig.InsecureSkipVerify, prefix+"s3.http.insecure-skip-verify", false, "Set to true to skip verifying the certificate chain and hostname.") f.StringVar(&cfg.HTTPConfig.CAFile, prefix+"s3.http.ca-file", "", "Path to the trusted CA file that signed the SSL certificate of the S3 endpoint.") f.StringVar(&cfg.SignatureVersion, prefix+"s3.signature-version", SignatureVersionV4, fmt.Sprintf("The signature version to use for authenticating against S3. Supported values are: %s.", strings.Join(supportedSignatureVersions, ", "))) + f.StringVar(&cfg.StorageClass, prefix+"s3.storage-class", storageawscommon.StorageClassStandard, fmt.Sprintf("The S3 storage class which objects will use. Supported values are: %s.", strings.Join(storageawscommon.SupportedStorageClasses, ", "))) f.DurationVar(&cfg.BackoffConfig.MinBackoff, prefix+"s3.min-backoff", 100*time.Millisecond, "Minimum backoff time when s3 get Object") f.DurationVar(&cfg.BackoffConfig.MaxBackoff, prefix+"s3.max-backoff", 3*time.Second, "Maximum backoff time when s3 get Object") @@ -128,7 +134,8 @@ func (cfg *S3Config) Validate() error { if !util.StringsContain(supportedSignatureVersions, cfg.SignatureVersion) { return errUnsupportedSignatureVersion } - return nil + + return storageawscommon.ValidateStorageClass(cfg.StorageClass) } type S3ObjectClient struct { @@ -243,7 +250,7 @@ func buildS3Client(cfg S3Config, hedgingCfg hedging.Config, hedging bool) (*s3.S } if cfg.AccessKeyID != "" && cfg.SecretAccessKey.String() != "" { - creds := credentials.NewStaticCredentials(cfg.AccessKeyID, cfg.SecretAccessKey.String(), "") + creds := credentials.NewStaticCredentials(cfg.AccessKeyID, cfg.SecretAccessKey.String(), cfg.SessionToken.String()) s3Config = s3Config.WithCredentials(creds) } @@ -393,9 +400,10 @@ func (a *S3ObjectClient) GetObject(ctx context.Context, objectKey string) (io.Re func (a *S3ObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { return instrument.CollectedRequest(ctx, "S3.PutObject", s3RequestDuration, instrument.ErrorCode, func(ctx context.Context) error { putObjectInput := &s3.PutObjectInput{ - Body: object, - Bucket: aws.String(a.bucketFromKey(objectKey)), - Key: aws.String(objectKey), + Body: object, + Bucket: aws.String(a.bucketFromKey(objectKey)), + Key: aws.String(objectKey), + StorageClass: aws.String(a.cfg.StorageClass), } if a.sseConfig != nil { diff --git a/pkg/storage/chunk/client/aws/s3_storage_client_test.go b/pkg/storage/chunk/client/aws/s3_storage_client_test.go index 2e2193702c6c..00ec9eba4072 100644 --- a/pkg/storage/chunk/client/aws/s3_storage_client_test.go +++ b/pkg/storage/chunk/client/aws/s3_storage_client_test.go @@ -176,5 +176,22 @@ secret_access_key: secret access key require.Equal(t, underTest.AccessKeyID, "access key id") require.Equal(t, underTest.SecretAccessKey.String(), "secret access key") + require.Equal(t, underTest.SessionToken.String(), "") + +} + +func Test_ConfigParsesCredentialsInlineWithSessionToken(t *testing.T) { + var underTest = S3Config{} + yamlCfg := ` +access_key_id: access key id +secret_access_key: secret access key +session_token: session token +` + err := yaml.Unmarshal([]byte(yamlCfg), &underTest) + require.NoError(t, err) + + require.Equal(t, underTest.AccessKeyID, "access key id") + require.Equal(t, underTest.SecretAccessKey.String(), "secret access key") + require.Equal(t, underTest.SessionToken.String(), "session token") } diff --git a/pkg/storage/chunk/client/openstack/swift_object_client.go b/pkg/storage/chunk/client/openstack/swift_object_client.go index e020e29ea4df..1211129ec16a 100644 --- a/pkg/storage/chunk/client/openstack/swift_object_client.go +++ b/pkg/storage/chunk/client/openstack/swift_object_client.go @@ -80,6 +80,7 @@ func createConnection(cfg SwiftConfig, hedgingCfg hedging.Config, hedging bool) c := &swift.Connection{ AuthVersion: cfg.AuthVersion, AuthUrl: cfg.AuthURL, + Internal: cfg.Internal, ApiKey: cfg.Password, UserName: cfg.Username, UserId: cfg.UserID, diff --git a/pkg/storage/common/aws/storage_class.go b/pkg/storage/common/aws/storage_class.go new file mode 100644 index 000000000000..b284c7326fbb --- /dev/null +++ b/pkg/storage/common/aws/storage_class.go @@ -0,0 +1,35 @@ +package aws + +import ( + "fmt" + "strings" + + "github.com/grafana/loki/pkg/util" +) + +const ( + + // S3 Storage Class options which define the data access, resiliency & cost requirements of objects + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#API_PutObject_RequestSyntax + StorageClassGlacier = "GLACIER" + StorageClassDeepArchive = "DEEP_ARCHIVE" + StorageClassGlacierInstantRetrieval = "GLACIER_IR" + StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + StorageClassOneZoneInfrequentAccess = "ONEZONE_IA" + StorageClassOutposts = "OUTPOSTS" + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + StorageClassStandard = "STANDARD" + StorageClassStandardInfrequentAccess = "STANDARD_IA" +) + +var ( + SupportedStorageClasses = []string{StorageClassGlacier, StorageClassDeepArchive, StorageClassGlacierInstantRetrieval, StorageClassIntelligentTiering, StorageClassOneZoneInfrequentAccess, StorageClassOutposts, StorageClassReducedRedundancy, StorageClassStandard, StorageClassStandardInfrequentAccess} +) + +func ValidateStorageClass(storageClass string) error { + if !util.StringsContain(SupportedStorageClasses, storageClass) { + return fmt.Errorf("unsupported S3 storage class: %s. Supported values: %s", storageClass, strings.Join(SupportedStorageClasses, ", ")) + } + + return nil +} diff --git a/pkg/storage/common/aws/storage_class_test.go b/pkg/storage/common/aws/storage_class_test.go new file mode 100644 index 000000000000..372d7b7c230f --- /dev/null +++ b/pkg/storage/common/aws/storage_class_test.go @@ -0,0 +1,30 @@ +package aws + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidateStorageClass(t *testing.T) { + tests := map[string]struct { + storageClass string + expectedError error + }{ + "should return error if storage class is invalid": { + "foo", + fmt.Errorf("unsupported S3 storage class: foo. Supported values: %s", strings.Join(SupportedStorageClasses, ", ")), + }, + "should not return error if storage class is is within supported values": { + StorageClassStandardInfrequentAccess, + nil, + }, + } + + for name, test := range tests { + actual := ValidateStorageClass(test.storageClass) + assert.Equal(t, test.expectedError, actual, name) + } +} diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go index bcc0f39c6e56..1c49983ac600 100644 --- a/pkg/storage/config/schema_config.go +++ b/pkg/storage/config/schema_config.go @@ -22,6 +22,7 @@ import ( const ( // Supported storage clients + StorageTypeAlibabaCloud = "alibabacloud" StorageTypeAWS = "aws" StorageTypeAWSDynamo = "aws-dynamo" StorageTypeAzure = "azure" diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index 1ce69523b715..b211710e3454 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -13,6 +13,7 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/cache" "github.com/grafana/loki/pkg/storage/chunk/client" + "github.com/grafana/loki/pkg/storage/chunk/client/alibaba" "github.com/grafana/loki/pkg/storage/chunk/client/aws" "github.com/grafana/loki/pkg/storage/chunk/client/azure" "github.com/grafana/loki/pkg/storage/chunk/client/baidubce" @@ -59,12 +60,13 @@ type StoreLimits interface { // NamedStores helps configure additional object stores from a given storage provider type NamedStores struct { - AWS map[string]aws.StorageConfig `yaml:"aws"` - Azure map[string]azure.BlobStorageConfig `yaml:"azure"` - BOS map[string]baidubce.BOSStorageConfig `yaml:"bos"` - Filesystem map[string]local.FSConfig `yaml:"filesystem"` - GCS map[string]gcp.GCSConfig `yaml:"gcs"` - Swift map[string]openstack.SwiftConfig `yaml:"swift"` + AWS map[string]aws.StorageConfig `yaml:"aws"` + Azure map[string]azure.BlobStorageConfig `yaml:"azure"` + BOS map[string]baidubce.BOSStorageConfig `yaml:"bos"` + Filesystem map[string]local.FSConfig `yaml:"filesystem"` + GCS map[string]gcp.GCSConfig `yaml:"gcs"` + AlibabaCloud map[string]alibaba.OssConfig `yaml:"alibabacloud"` + Swift map[string]openstack.SwiftConfig `yaml:"swift"` // contains mapping from named store reference name to store type storeType map[string]string `yaml:"-"` @@ -102,7 +104,12 @@ func (ns *NamedStores) populateStoreType() error { } ns.storeType[name] = config.StorageTypeAzure } - + for name := range ns.AlibabaCloud { + if err := checkForDuplicates(name); err != nil { + return err + } + ns.storeType[name] = config.StorageTypeAlibabaCloud + } for name := range ns.BOS { if err := checkForDuplicates(name); err != nil { return err @@ -158,6 +165,7 @@ func (ns *NamedStores) validate() error { // Config chooses which storage client to use. type Config struct { + AlibabaStorageConfig alibaba.OssConfig `yaml:"alibabacloud"` AWSStorageConfig aws.StorageConfig `yaml:"aws"` AzureStorageConfig azure.BlobStorageConfig `yaml:"azure"` BOSStorageConfig baidubce.BOSStorageConfig `yaml:"bos"` @@ -334,6 +342,12 @@ func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, clie return nil, err } return client.NewClientWithMaxParallel(c, nil, cfg.MaxParallelGetChunk, schemaCfg), nil + case config.StorageTypeAlibabaCloud: + c, err := alibaba.NewOssObjectClient(context.Background(), cfg.AlibabaStorageConfig) + if err != nil { + return nil, err + } + return client.NewClientWithMaxParallel(c, nil, cfg.MaxParallelGetChunk, schemaCfg), nil case config.StorageTypeBOS: c, err := NewObjectClient(name, cfg, clientMetrics) if err != nil { @@ -467,6 +481,16 @@ func NewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (clie } return aws.NewS3ObjectClient(s3Cfg, cfg.Hedging) + case config.StorageTypeAlibabaCloud: + ossCfg := cfg.AlibabaStorageConfig + if namedStore != "" { + var ok bool + ossCfg, ok = cfg.NamedStores.AlibabaCloud[namedStore] + if !ok { + return nil, fmt.Errorf("Unrecognized named alibabacloud oss storage config %s", name) + } + } + return alibaba.NewOssObjectClient(context.Background(), ossCfg) case config.StorageTypeGCS: gcsCfg := cfg.GCSConfig if namedStore != "" { diff --git a/pkg/usagestats/reporter.go b/pkg/usagestats/reporter.go index 3387e57a8de9..2ed35cc4261f 100644 --- a/pkg/usagestats/reporter.go +++ b/pkg/usagestats/reporter.go @@ -221,9 +221,6 @@ func (rep *Reporter) readSeedFile(ctx context.Context) (*ClusterSeed, error) { if err != nil { return nil, err } - if err != nil { - return nil, err - } defer func() { if err := reader.Close(); err != nil { level.Error(rep.logger).Log("msg", "failed to close reader", "err", err) diff --git a/pkg/util/loser/tree.go b/pkg/util/loser/tree.go new file mode 100644 index 000000000000..64aa84024540 --- /dev/null +++ b/pkg/util/loser/tree.go @@ -0,0 +1,128 @@ +// Loser tree, from https://en.wikipedia.org/wiki/K-way_merge_algorithm#Tournament_Tree + +package loser + +type Sequence interface { + Next() bool // Advances and returns true if there is a value at this new position. +} + +func New[E any, S Sequence](sequences []S, maxVal E, at func(S) E, less func(E, E) bool, close func(S)) *Tree[E, S] { + nSequences := len(sequences) + t := Tree[E, S]{ + maxVal: maxVal, + at: at, + less: less, + close: close, + nodes: make([]node[E, S], nSequences*2), + } + for i, s := range sequences { + t.nodes[i+nSequences].items = s + } + if nSequences > 0 { + t.nodes[0].index = -1 // flag to be initialized on first call to Next(). + } + return &t +} + +// Call the close function on all sequences that are still open. +func (t *Tree[E, S]) Close() { + for _, e := range t.nodes[len(t.nodes)/2 : len(t.nodes)] { + if e.index == -1 { + continue + } + t.close(e.items) + } +} + +// A loser tree is a binary tree laid out such that nodes N and N+1 have parent N/2. +// We store M leaf nodes in positions M...2M-1, and M-1 internal nodes in positions 1..M-1. +// Node 0 is a special node, containing the winner of the contest. +type Tree[E any, S Sequence] struct { + maxVal E + at func(S) E + less func(E, E) bool + close func(S) // Called when Next() returns false. + nodes []node[E, S] +} + +type node[E any, S Sequence] struct { + index int // This is the loser for all nodes except the 0th, where it is the winner. + value E // Value copied from the loser node, or winner for node 0. + items S // Only populated for leaf nodes. +} + +func (t *Tree[E, S]) moveNext(index int) bool { + n := &t.nodes[index] + if n.items.Next() { + n.value = t.at(n.items) + return true + } + t.close(n.items) // Next() returned false; close it and mark as finished. + n.value = t.maxVal + n.index = -1 + return false +} + +func (t *Tree[E, S]) Winner() S { + return t.nodes[t.nodes[0].index].items +} + +func (t *Tree[E, S]) Next() bool { + if len(t.nodes) == 0 { + return false + } + if t.nodes[0].index == -1 { // If tree has not been initialized yet, do that. + t.initialize() + return t.nodes[t.nodes[0].index].index != -1 + } + t.moveNext(t.nodes[0].index) + t.replayGames(t.nodes[0].index) + return t.nodes[t.nodes[0].index].index != -1 +} + +func (t *Tree[E, S]) initialize() { + winners := make([]int, len(t.nodes)) + // Initialize leaf nodes as winners to start. + for i := len(t.nodes) / 2; i < len(t.nodes); i++ { + winners[i] = i + t.moveNext(i) // Must call Next on each item so that At() has a value. + } + for i := len(t.nodes) - 2; i > 0; i -= 2 { + // At each stage the winners play each other, and we record the loser in the node. + loser, winner := t.playGame(winners[i], winners[i+1]) + p := parent(i) + t.nodes[p].index = loser + t.nodes[p].value = t.nodes[loser].value + winners[p] = winner + } + t.nodes[0].index = winners[1] + t.nodes[0].value = t.nodes[winners[1]].value +} + +// Starting at pos, re-consider all values up to the root. +func (t *Tree[E, S]) replayGames(pos int) { + // At the start, pos is a leaf node, and is the winner at that level. + n := parent(pos) + for n != 0 { + if t.less(t.nodes[n].value, t.nodes[pos].value) { + loser := pos + // Record pos as the loser here, and the old loser is the new winner. + pos = t.nodes[n].index + t.nodes[n].index = loser + t.nodes[n].value = t.nodes[loser].value + } + n = parent(n) + } + // pos is now the winner; store it in node 0. + t.nodes[0].index = pos + t.nodes[0].value = t.nodes[pos].value +} + +func (t *Tree[E, S]) playGame(a, b int) (loser, winner int) { + if t.less(t.nodes[a].value, t.nodes[b].value) { + return b, a + } + return a, b +} + +func parent(i int) int { return i / 2 } diff --git a/pkg/util/loser/tree_test.go b/pkg/util/loser/tree_test.go new file mode 100644 index 000000000000..7d06b07b481d --- /dev/null +++ b/pkg/util/loser/tree_test.go @@ -0,0 +1,120 @@ +package loser_test + +import ( + "math" + "testing" + + "github.com/grafana/loki/pkg/util/loser" +) + +type List struct { + list []uint64 + cur uint64 +} + +func NewList(list ...uint64) *List { + return &List{list: list} +} + +func (it *List) At() uint64 { + return it.cur +} + +func (it *List) Next() bool { + if len(it.list) > 0 { + it.cur = it.list[0] + it.list = it.list[1:] + return true + } + it.cur = 0 + return false +} + +func (it *List) Seek(val uint64) bool { + for it.cur < val && len(it.list) > 0 { + it.cur = it.list[0] + it.list = it.list[1:] + } + return len(it.list) > 0 +} + +func checkIterablesEqual[E any, S1 loser.Sequence, S2 loser.Sequence](t *testing.T, a S1, b S2, at1 func(S1) E, at2 func(S2) E, less func(E, E) bool) { + t.Helper() + count := 0 + for a.Next() { + count++ + if !b.Next() { + t.Fatalf("b ended before a after %d elements", count) + } + if less(at1(a), at2(b)) || less(at2(b), at1(a)) { + t.Fatalf("position %d: %v != %v", count, at1(a), at2(b)) + } + } + if b.Next() { + t.Fatalf("a ended before b after %d elements", count) + } +} + +func TestMerge(t *testing.T) { + tests := []struct { + name string + args []*List + want *List + }{ + { + name: "empty input", + want: NewList(), + }, + { + name: "one list", + args: []*List{NewList(1, 2, 3, 4)}, + want: NewList(1, 2, 3, 4), + }, + { + name: "two lists", + args: []*List{NewList(3, 4, 5), NewList(1, 2)}, + want: NewList(1, 2, 3, 4, 5), + }, + { + name: "two lists, first empty", + args: []*List{NewList(), NewList(1, 2)}, + want: NewList(1, 2), + }, + { + name: "two lists, second empty", + args: []*List{NewList(1, 2), NewList()}, + want: NewList(1, 2), + }, + { + name: "two lists b", + args: []*List{NewList(1, 2), NewList(3, 4, 5)}, + want: NewList(1, 2, 3, 4, 5), + }, + { + name: "two lists c", + args: []*List{NewList(1, 3), NewList(2, 4, 5)}, + want: NewList(1, 2, 3, 4, 5), + }, + { + name: "three lists", + args: []*List{NewList(1, 3), NewList(2, 4), NewList(5)}, + want: NewList(1, 2, 3, 4, 5), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + at := func(s *List) uint64 { return s.At() } + less := func(a, b uint64) bool { return a < b } + numCloses := 0 + close := func(s *List) { + numCloses++ + } + lt := loser.New(tt.args, math.MaxUint64, at, less, close) + at2 := func(s *loser.Tree[uint64, *List]) uint64 { return s.Winner().At() } + checkIterablesEqual(t, tt.want, lt, at, at2, less) + if numCloses != len(tt.args) { + t.Errorf("Expected %d closes, got %d", len(tt.args), numCloses) + } + }) + } +} diff --git a/pkg/util/metrics_helper_test.go b/pkg/util/metrics_helper_test.go index cf4fe9b16281..7ca74ab7b022 100644 --- a/pkg/util/metrics_helper_test.go +++ b/pkg/util/metrics_helper_test.go @@ -461,7 +461,7 @@ func TestFloat64PrecisionStability(t *testing.T) { // Randomise the seed but log it in case we need to reproduce the test on failure. seed := time.Now().UnixNano() - rand.Seed(seed) + randomGenerator := rand.New(rand.NewSource(seed)) t.Log("random generator seed:", seed) // Generate a large number of registries with different metrics each. @@ -472,22 +472,22 @@ func TestFloat64PrecisionStability(t *testing.T) { g := promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{Name: "test_gauge"}, labelNames) for i := 0; i < cardinality; i++ { - g.WithLabelValues("a", strconv.Itoa(i)).Set(rand.Float64()) + g.WithLabelValues("a", strconv.Itoa(i)).Set(randomGenerator.Float64()) } c := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{Name: "test_counter"}, labelNames) for i := 0; i < cardinality; i++ { - c.WithLabelValues("a", strconv.Itoa(i)).Add(rand.Float64()) + c.WithLabelValues("a", strconv.Itoa(i)).Add(randomGenerator.Float64()) } h := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{Name: "test_histogram", Buckets: []float64{0.1, 0.5, 1}}, labelNames) for i := 0; i < cardinality; i++ { - h.WithLabelValues("a", strconv.Itoa(i)).Observe(rand.Float64()) + h.WithLabelValues("a", strconv.Itoa(i)).Observe(randomGenerator.Float64()) } s := promauto.With(reg).NewSummaryVec(prometheus.SummaryOpts{Name: "test_summary"}, labelNames) for i := 0; i < cardinality; i++ { - s.WithLabelValues("a", strconv.Itoa(i)).Observe(rand.Float64()) + s.WithLabelValues("a", strconv.Itoa(i)).Observe(randomGenerator.Float64()) } registries.AddUserRegistry(strconv.Itoa(userID), reg) diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 8ed3f3db2ce5..b788327b4af3 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -11,7 +11,30 @@ Entries should be ordered as follows: Entries should include a reference to the pull request that introduced the change. -[//]: # ( : do not remove this line. Add your changelog bellow this line. This locator is used by CI pipeline to find the place where to put changelog entry.) +[//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) + +## 4.7 + +- [CHANGE] **BREAKING** Rename `gel-license.jwt` property of secret `gel-secrets` to `license.jwt` on enterprise-logs chart. + +## 4.6.2 + +- [BUGFIX] Fix tokengen and provisioner secrets creation on enterprise-logs chart. + +## 4.6.1 + +- [FEATURE] Add `gateway.nginxConfig.customReadUrl`, `gateway.nginxConfig.customWriteUrl` and `gateway.nginxConfig.customBackendUrl` to override read/write/backend paths. +- [BUGFIX] Remove unreleased setting `useFederatedToken` from Azure configuration block. + +## 4.6 + +- [Change] Bump Loki version to 2.7.3. Revert to 2 target simple scalable mode as default until third target ships in minor release. + +## 4.5.1 + +- [BUGFIX] Fix rendering of namespace in provisioner job. +- [ENHANCEMENT] Allow to configure `publishNotReadyAddresses` on memberlist service. +- [BUGFIX] Correctly set `compactor_address` for 3 target scalable configuration. ## 4.5 diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 6bd6d0b336ef..56f81d647735 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,8 +3,8 @@ apiVersion: v2 name: loki description: Helm chart for Grafana Loki in simple, scalable mode type: application -appVersion: 2.7.2 -version: 4.5.0 +appVersion: 2.7.3 +version: 4.7.0 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 56d88e12e035..8c1c5f4c904c 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 4.5.0](https://img.shields.io/badge/Version-4.5.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.7.2](https://img.shields.io/badge/AppVersion-2.7.2-informational?style=flat-square) +![Version: 4.7.0](https://img.shields.io/badge/Version-4.7.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.7.3](https://img.shields.io/badge/AppVersion-2.7.3-informational?style=flat-square) Helm chart for Grafana Loki in simple, scalable mode diff --git a/production/helm/loki/ci/default-values.yaml b/production/helm/loki/ci/default-values.yaml index 2b8340643c39..c143b416be47 100644 --- a/production/helm/loki/ci/default-values.yaml +++ b/production/helm/loki/ci/default-values.yaml @@ -2,6 +2,8 @@ loki: commonConfig: replication_factor: 1 + image: + tag: "main-5e53303" read: replicas: 1 write: diff --git a/production/helm/loki/ci/enterprise.yaml b/production/helm/loki/ci/enterprise.yaml index f271dc563762..95f24946d32f 100644 --- a/production/helm/loki/ci/enterprise.yaml +++ b/production/helm/loki/ci/enterprise.yaml @@ -11,6 +11,8 @@ enterprise: loki: commonConfig: replication_factor: 1 + image: + tag: "main-5e53303" storage: type: local read: diff --git a/production/helm/loki/ci/ingress-values.yaml b/production/helm/loki/ci/ingress-values.yaml index 3d5fa688d1fd..adff785167fe 100644 --- a/production/helm/loki/ci/ingress-values.yaml +++ b/production/helm/loki/ci/ingress-values.yaml @@ -11,6 +11,8 @@ gateway: loki: commonConfig: replication_factor: 1 + image: + tag: "main-5e53303" read: replicas: 1 write: diff --git a/production/helm/loki/docs/examples/enterprise/README.md b/production/helm/loki/docs/examples/enterprise/README.md index 42004f1e287d..d28b48ed985a 100644 --- a/production/helm/loki/docs/examples/enterprise/README.md +++ b/production/helm/loki/docs/examples/enterprise/README.md @@ -1,20 +1,20 @@ ## Introduction -This example gives you an example or getting started overrides value file for deploying Loki (Enterprise Licensed) using the Simple Scalable architecture in GKE and using GCS +This example gives you an example or getting started overrides value file for deploying Loki (Enterprise Licensed) using the Simple Scalable architecture in GKE and using GCS. ## Installation of Helm Chart -These instructions assume you have already have access to a Kubernetes cluster, GCS Bucket and GCP Service Account which has read/write permissions to that GCS Bucket. +These instructions assume you already have access to a Kubernetes cluster, GCS Bucket and GCP Service Account which has read/write permissions to that GCS Bucket. ### Populate Secret Values -Populate the examples/enterprise-secrets.yaml so that: -- The gcp_service_account.json secret has the contents of your GCP Service Account JSON key -- The gel-license.jwt secret has the contents of your Grafana Enterprise Logs license key given to your by Grafana Labs +Populate the [enterprise-secrets.yaml](./enterprise-secrets.yaml) so that: +- The `gcp_service_account.json` secret has the contents of your GCP Service Account JSON key. +- The `license.jwt` secret has the contents of your Grafana Enterprise Logs license key given to your by Grafana Labs. -Deploy the secrets file to your k8s cluster. +Deploy the secrets file to your k8s cluster with the command: `kubectl apply -f enterprise-secrets.yaml` ### Configure the Helm Chart -Open examples/overides-enterprise-gcs.yaml and replace `{YOUR_GCS_BUCKET}` with the name of your GCS bucket. If there are other things you'd like to configure, view the core [Values.yaml file](https://github.com/grafana/helm-charts/blob/main/charts/loki-simple-scalable/values.yaml) and override anything else you need to within the overrides-enterprise-gcs.yaml file. +Open [overrides-enterprise-gcs.yaml](./overrides-enterprise-gcs.yaml) and replace `{YOUR_GCS_BUCKET}` with the name of your GCS bucket. If there are other things you'd like to configure, view the core [Values.yaml file](https://github.com/grafana/helm-charts/blob/main/charts/loki-simple-scalable/values.yaml) and override anything else you need to within the overrides-enterprise-gcs.yaml file. ### Install the Helm chart @@ -25,5 +25,4 @@ Open examples/overides-enterprise-gcs.yaml and replace `{YOUR_GCS_BUCKET}` with `kubectl --namespace {KUBERNETES_NAMESPACE} logs $POD_NAME loki | grep Token` -Take note of this token, you will need it when connecting Grafana Enterprise Logs to Grafana - +Take note of this token, you will need it when connecting Grafana Enterprise Logs to Grafana. diff --git a/production/helm/loki/docs/examples/enterprise/enterprise-secrets.yaml b/production/helm/loki/docs/examples/enterprise/enterprise-secrets.yaml index 77266fa72ef9..698e94b44597 100644 --- a/production/helm/loki/docs/examples/enterprise/enterprise-secrets.yaml +++ b/production/helm/loki/docs/examples/enterprise/enterprise-secrets.yaml @@ -9,4 +9,4 @@ stringData: GCP_SERVICE_ACCOUNT_JSON_HERE } - gel-license.jwt: LICENSE_HERE \ No newline at end of file + license.jwt: LICENSE_HERE diff --git a/production/helm/loki/docs/examples/enterprise/overrides-enterprise-gcs.yaml b/production/helm/loki/docs/examples/enterprise/overrides-enterprise-gcs.yaml index 348b8b788f91..01210d309137 100644 --- a/production/helm/loki/docs/examples/enterprise/overrides-enterprise-gcs.yaml +++ b/production/helm/loki/docs/examples/enterprise/overrides-enterprise-gcs.yaml @@ -14,7 +14,7 @@ enterprise: secret: secretName: gel-secrets items: - - key: gel-license.jwt + - key: license.jwt path: license.jwt - key: gcp_service_account.json path: gcp_service_account.json @@ -43,7 +43,7 @@ write: secret: secretName: gel-secrets items: - - key: gel-license.jwt + - key: license.jwt path: license.jwt - key: gcp_service_account.json path: gcp_service_account.json @@ -60,7 +60,7 @@ read: secret: secretName: gel-secrets items: - - key: gel-license.jwt + - key: license.jwt path: license.jwt - key: gcp_service_account.json path: gcp_service_account.json @@ -77,7 +77,7 @@ gateway: secret: secretName: gel-secrets items: - - key: gel-license.jwt + - key: license.jwt path: license.jwt - key: gcp_service_account.json path: gcp_service_account.json diff --git a/production/helm/loki/src/helm-test/Dockerfile b/production/helm/loki/src/helm-test/Dockerfile index 5ffb228f70bc..3fdc486d0407 100644 --- a/production/helm/loki/src/helm-test/Dockerfile +++ b/production/helm/loki/src/helm-test/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18.5 as build +FROM golang:1.20.1 as build # build via Makefile target helm-test-image in root # Makefile. Building from this directory will not be @@ -7,7 +7,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make clean && make BUILD_IN_CONTAINER=false helm-test -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN apk add --update --no-cache ca-certificates=20220614-r0 COPY --from=build /src/loki/production/helm/loki/src/helm-test/helm-test /usr/bin/helm-test ENTRYPOINT [ "/usr/bin/helm-test" ] diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl index ed783e1f56a1..4d0ca34e1a00 100644 --- a/production/helm/loki/templates/_helpers.tpl +++ b/production/helm/loki/templates/_helpers.tpl @@ -240,7 +240,6 @@ azure: {{- end }} container_name: {{ $.Values.loki.storage.bucketNames.chunks }} use_managed_identity: {{ .useManagedIdentity }} - use_federated_token: {{ .useFederatedToken }} {{- with .userAssignedId }} user_assigned_id: {{ . }} {{- end }} @@ -307,7 +306,6 @@ azure: {{- end }} container_name: {{ $.Values.loki.storage.bucketNames.ruler }} use_managed_identity: {{ .useManagedIdentity }} - use_federated_token: {{ .useFederatedToken }} {{- with .userAssignedId }} user_assigned_id: {{ . }} {{- end }} @@ -558,7 +556,7 @@ http { resolver {{ .Values.global.dnsService }}.{{ .Values.global.dnsNamespace }}.svc.{{ .Values.global.clusterDomain }}.; {{- with .Values.gateway.nginxConfig.httpSnippet }} - {{ . | nindent 2 }} + {{- tpl . $ | nindent 2 }} {{- end }} server { @@ -588,73 +586,87 @@ http { {{- $writeHost = include "loki.singleBinaryFullname" .}} {{- end }} + {{- $writeUrl := printf "http://%s.%s.svc.%s:3100" $writeHost .Release.Namespace .Values.global.clusterDomain }} + {{- $readUrl := printf "http://%s.%s.svc.%s:3100" $readHost .Release.Namespace .Values.global.clusterDomain }} + {{- $backendUrl := printf "http://%s.%s.svc.%s:3100" $backendHost .Release.Namespace .Values.global.clusterDomain }} + + {{- if .Values.gateway.nginxConfig.customWriteUrl }} + {{- $writeUrl = .Values.gateway.nginxConfig.customWriteUrl }} + {{- end }} + {{- if .Values.gateway.nginxConfig.customReadUrl }} + {{- $readUrl = .Values.gateway.nginxConfig.customReadUrl }} + {{- end }} + {{- if .Values.gateway.nginxConfig.customBackendUrl }} + {{- $backendUrl = .Values.gateway.nginxConfig.customBackendUrl }} + {{- end }} + location = /api/prom/push { - proxy_pass http://{{ $writeHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $writeUrl }}$request_uri; } location = /api/prom/tail { - proxy_pass http://{{ $readHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $readUrl }}$request_uri; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; } location ~ /api/prom/.* { - proxy_pass http://{{ $readHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $readUrl }}$request_uri; } location ~ /prometheus/api/v1/alerts.* { - proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $backendUrl }}$request_uri; } location ~ /prometheus/api/v1/rules.* { - proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $backendUrl }}$request_uri; } location ~ /ruler/.* { - proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $backendUrl }}$request_uri; } location = /loki/api/v1/push { - proxy_pass http://{{ $writeHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $writeUrl }}$request_uri; } location = /loki/api/v1/tail { - proxy_pass http://{{ $readHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $readUrl }}$request_uri; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; } location ~ /compactor/.* { - proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $backendUrl }}$request_uri; } location ~ /distributor/.* { - proxy_pass http://{{ $writeHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $writeUrl }}$request_uri; } location ~ /ring { - proxy_pass http://{{ $writeHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $writeUrl }}$request_uri; } location ~ /ingester/.* { - proxy_pass http://{{ $writeHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $writeUrl }}$request_uri; } location ~ /store-gateway/.* { - proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $backendUrl }}$request_uri; } location ~ /query-scheduler/.* { - proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $backendUrl }}$request_uri; } location ~ /scheduler/.* { - proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $backendUrl }}$request_uri; } location ~ /loki/api/.* { - proxy_pass http://{{ $readHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $readUrl }}$request_uri; } location ~ /admin/api/.* { - proxy_pass http://{{ $writeHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri; + proxy_pass {{ $writeUrl }}$request_uri; } {{- with .Values.gateway.nginxConfig.serverSnippet }} @@ -674,3 +686,18 @@ enableServiceLinks: false {{- end -}} {{- end -}} {{- end -}} + +{{/* Determine compactor address based on target configuration */}} +{{- define "loki.compactorAddress" -}} +{{- $isSimpleScalable := eq (include "loki.deployment.isScalable" .) "true" -}} +{{- $compactorAddress := include "loki.backendFullname" . -}} +{{- if and $isSimpleScalable .Values.read.legacyReadTarget -}} +{{/* 2 target configuration */}} +{{- $compactorAddress = include "loki.readFullname" . -}} +{{- else if (not $isSimpleScalable) -}} +{{/* single binary */}} +{{- $compactorAddress = include "loki.singleBinaryFullname" . -}} +{{- end -}} +{{- printf "%s" $compactorAddress }} +{{- end }} + diff --git a/production/helm/loki/templates/backend/statefulset-backend.yaml b/production/helm/loki/templates/backend/statefulset-backend.yaml index 14eac0179ded..7090b75340a0 100644 --- a/production/helm/loki/templates/backend/statefulset-backend.yaml +++ b/production/helm/loki/templates/backend/statefulset-backend.yaml @@ -40,6 +40,12 @@ spec: {{- end }} labels: {{- include "loki.backendSelectorLabels" . | nindent 8 }} + {{- with .Values.loki.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.backend.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} {{- with .Values.backend.selectorLabels }} {{- tpl (toYaml .) $ | nindent 8 }} {{- end }} @@ -99,6 +105,8 @@ spec: mountPath: /etc/loki/config - name: runtime-config mountPath: /etc/loki/runtime-config + - name: tmp + mountPath: /tmp - name: data mountPath: /var/loki {{- if .Values.enterprise.enabled }} @@ -123,6 +131,8 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumes: + - name: tmp + emptyDir: {} - name: config {{- if .Values.loki.existingSecretForConfig }} secret: diff --git a/production/helm/loki/templates/provisioner/job-provisioner.yaml b/production/helm/loki/templates/provisioner/job-provisioner.yaml index 626fa592337e..ae9037b33f7c 100644 --- a/production/helm/loki/templates/provisioner/job-provisioner.yaml +++ b/production/helm/loki/templates/provisioner/job-provisioner.yaml @@ -92,20 +92,30 @@ spec: - /bin/bash - -exuc - | + # In case, the admin resources have already been created, the provisioner job + # does not write the token files to the bootstrap mount. + # Therefore, secrets are only created if the respective token files exist. + # Note: the following bash commands should always return a success status code. + # Therefore, in case the token file does not exist, the first clause of the + # or-operation is successful. {{- range .Values.enterprise.provisioner.additionalTenants }} - kubectl --namespace "{{ .secretNamespace }}" create secret generic "{{ include "enterprise-logs.provisionedSecretPrefix" $ }}-{{ .name }}" \ - --from-literal=token-write="$(cat /bootstrap/token-write-{{ .name }})" \ - --from-literal=token-read="$(cat /bootstrap/token-read-{{ .name }})" + ! test -s /bootstrap/token-write-{{ .name }} || \ + kubectl --namespace "{{ .secretNamespace }}" create secret generic "{{ include "enterprise-logs.provisionedSecretPrefix" $ }}-{{ .name }}" \ + --from-literal=token-write="$(cat /bootstrap/token-write-{{ .name }})" \ + --from-literal=token-read="$(cat /bootstrap/token-read-{{ .name }})" {{- end }} {{- $namespace := $.Release.Namespace }} {{- with .Values.monitoring.selfMonitoring.tenant }} - kubectl --namespace "{{ $namespace }}" create secret generic "{{ include "enterprise-logs.selfMonitoringTenantSecret" $ }}" \ - --from-literal=username="{{ .name }}" \ - --from-literal=password="$(cat /bootstrap/token-self-monitoring)" - {{- if not (eq .secretNamespace $namespace) }} - kubectl --namespace "{{ .secretNamespace }}" create secret generic "{{ include "enterprise-logs.selfMonitoringTenantSecret" $ }}" \ - --from-literal=username="{{ .name }}" \ - --from-literal=password="$(cat /bootstrap/token-self-monitoring)" + {{- $secretNamespace := tpl .secretNamespace $ }} + ! test -s /bootstrap/token-self-monitoring || \ + kubectl --namespace "{{ $namespace }}" create secret generic "{{ include "enterprise-logs.selfMonitoringTenantSecret" $ }}" \ + --from-literal=username="{{ .name }}" \ + --from-literal=password="$(cat /bootstrap/token-self-monitoring)" + {{- if not (eq $secretNamespace $namespace) }} + ! test -s /bootstrap/token-self-monitoring || \ + kubectl --namespace "{{ $secretNamespace }}" create secret generic "{{ include "enterprise-logs.selfMonitoringTenantSecret" $ }}" \ + --from-literal=username="{{ .name }}" \ + --from-literal=password="$(cat /bootstrap/token-self-monitoring)" {{- end }} {{- end }} volumeMounts: diff --git a/production/helm/loki/templates/service-memberlist.yaml b/production/helm/loki/templates/service-memberlist.yaml index 3b371aa400df..ca1048597eeb 100644 --- a/production/helm/loki/templates/service-memberlist.yaml +++ b/production/helm/loki/templates/service-memberlist.yaml @@ -13,6 +13,9 @@ spec: port: 7946 targetPort: http-memberlist protocol: TCP + {{- with .Values.memberlist.service.publishNotReadyAddresses }} + publishNotReadyAddresses: {{ . }} + {{- end }} selector: {{- include "loki.selectorLabels" . | nindent 4 }} app.kubernetes.io/part-of: memberlist diff --git a/production/helm/loki/templates/tokengen/clusterrole-tokengen.yaml b/production/helm/loki/templates/tokengen/clusterrole-tokengen.yaml index 2ebfb1479b18..19dad8804bd6 100644 --- a/production/helm/loki/templates/tokengen/clusterrole-tokengen.yaml +++ b/production/helm/loki/templates/tokengen/clusterrole-tokengen.yaml @@ -17,5 +17,5 @@ metadata: rules: - apiGroups: [""] resources: ["secrets"] - verbs: ["create"] + verbs: ["create", "get", "patch"] {{- end }} diff --git a/production/helm/loki/templates/tokengen/job-tokengen.yaml b/production/helm/loki/templates/tokengen/job-tokengen.yaml index 670a8128874f..abc1946554f5 100644 --- a/production/helm/loki/templates/tokengen/job-tokengen.yaml +++ b/production/helm/loki/templates/tokengen/job-tokengen.yaml @@ -45,6 +45,10 @@ spec: image: {{ template "loki.image" . }} imagePullPolicy: {{ .Values.loki.image.pullPolicy }} args: + # The shared emptyDir exists only while the job is running, and is deleted once the job is completed. + # The tokengen generates a new admin token in case the 'token-file' file doesn't exist. + # As a result, subsequent executions of this tokengen job will generate new admin tokens. + # Note that previously generated tokens remain valid, as these remain present in the object storage. - -config.file=/etc/loki/config/config.yaml - -target={{ .Values.enterprise.tokengen.targetModule }} - -tokengen.token-file=/shared/admin-token @@ -79,10 +83,17 @@ spec: - /bin/bash - -euc - | - kubectl create secret generic "{{ include "enterprise-logs.adminTokenSecret" . }}" --from-file=token=/shared/admin-token + # Create or update admin token secrets generated by tokengen job + kubectl create secret generic "{{ include "enterprise-logs.adminTokenSecret" . }}" \ + --from-file=token=/shared/admin-token \ + --dry-run=client -o yaml \ + | kubectl apply -f - {{- with .Values.enterprise.adminToken.additionalNamespaces }} {{- range . }} - kubectl --namespace "{{ . }}" create secret generic "{{ include "enterprise-logs.adminTokenSecret" $ }}" --from-file=token=/shared/admin-token + kubectl --namespace "{{ . }}" create secret generic "{{ include "enterprise-logs.adminTokenSecret" $ }}" \ + --from-file=token=/shared/admin-token \ + --dry-run=client -o yaml \ + | kubectl apply -f - {{- end }} {{- end }} volumeMounts: diff --git a/production/helm/loki/templates/validate.yaml b/production/helm/loki/templates/validate.yaml index 49bf2d9c29fd..3a2e8ca79fad 100644 --- a/production/helm/loki/templates/validate.yaml +++ b/production/helm/loki/templates/validate.yaml @@ -25,4 +25,3 @@ {{- if and $isUsingFilesystem (and (eq $singleBinaryReplicas 0) $atLeastOneScalableReplica) }} {{- fail "Cannot run Scalable targets (backend, read, write) without an object storage backend."}} {{- end }} - diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 1e4bb9c9d103..fa0246621637 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -47,7 +47,7 @@ loki: # -- Overrides the image tag whose default is the chart's appVersion # TODO: needed for 3rd target backend functionality # revert to null or latest once this behavior is relased - tag: "main-5e53303" + tag: null # -- Docker image pull policy pullPolicy: IfNotPresent # -- Common annotations for all pods @@ -202,6 +202,8 @@ loki: # Should authentication be enabled auth_enabled: true + # -- Tenants list to be created on nginx htpasswd file, with name and password keys + tenants: [] # -- Check https://grafana.com/docs/loki/latest/configuration/#server for more info on the server configuration. server: @@ -223,6 +225,7 @@ loki: commonConfig: path_prefix: /var/loki replication_factor: 3 + compactor_address: '{{ include "loki.compactorAddress" . }}' # -- Storage config. Providing this will automatically populate all necessary storage configs in the templated config. storage: @@ -248,7 +251,6 @@ loki: accountName: null accountKey: null useManagedIdentity: false - useFederatedToken: false userAssignedId: null requestTimeout: null filesystem: @@ -548,7 +550,9 @@ monitoring: # -- Additional ServiceMonitor labels labels: {} # -- ServiceMonitor scrape interval - interval: null + # Default is 15s because included recording rules use a 1m rate, and scrape interval needs to be at + # least 1/4 rate interval. + interval: 15s # -- ServiceMonitor scrape timeout in Go duration format (e.g. 15s) scrapeTimeout: null # -- ServiceMonitor relabel configs to apply to samples before scraping @@ -811,7 +815,7 @@ read: # -- Whether or not to use the 2 target type simple scalable mode (read, write) or the # 3 target type (read, write, backend). Legacy refers to the 2 target type, so true will # run two targets, false will run 3 targets. - legacyReadTarget: false + legacyReadTarget: true # -- Additional CLI args for the read extraArgs: [] # -- Environment variables to add to the read pods @@ -870,6 +874,8 @@ backend: priorityClassName: null # -- Annotations for backend pods podAnnotations: {} + # -- Additional labels for each `backend` pod + podLabels: {} # -- Additional selector labels for each `backend` pod selectorLabels: {} # -- Labels for ingester service @@ -1040,6 +1046,11 @@ ingress: # - loki.example.com # secretName: loki-distributed-tls +# Configuration for the memberlist service +memberlist: + service: + publishNotReadyAddresses: false + # Configuration for the gateway gateway: # -- Specifies whether the gateway should be enabled @@ -1164,12 +1175,18 @@ gateway: username: null # -- The basic auth password for the gateway password: null - # -- Uses the specified username and password to compute a htpasswd using Sprig's `htpasswd` function. + # -- Uses the specified users from the `loki.tenants` list to create the htpasswd file + # if `loki.tenants` is not set, the `gateway.basicAuth.username` and `gateway.basicAuth.password` are used # The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes # high CPU load. htpasswd: >- + {{ if .Values.loki.tenants }} + {{- range $t := .Values.loki.tenants }} + {{ htpasswd (required "All tenants must have a 'name' set" $t.name) (required "All tenants must have a 'password' set" $t.password) }} + {{- end }} + {{ else }} {{ htpasswd (required "'gateway.basicAuth.username' is required" .Values.gateway.basicAuth.username) (required "'gateway.basicAuth.password' is required" .Values.gateway.basicAuth.password) }} - + {{ end }} # -- Existing basic auth secret to use. Must contain '.htpasswd' existingSecret: null # Configures the readiness probe for the gateway @@ -1187,8 +1204,15 @@ gateway: '"$http_user_agent" "$http_x_forwarded_for"'; # -- Allows appending custom configuration to the server block serverSnippet: "" - # -- Allows appending custom configuration to the http block - httpSnippet: "" + # -- Allows appending custom configuration to the http block, passed through the `tpl` function to allow templating + httpSnippet: >- + {{ if .Values.loki.tenants }}proxy_set_header X-Scope-OrgID $remote_user;{{ end }} + # -- Override Read URL + customReadUrl: null + # -- Override Write URL + customWriteUrl: null + # -- Override Backend URL + customBackendUrl: null # -- Config file contents for Nginx. Passed through the `tpl` function to allow templating # @default -- See values.yaml file: | diff --git a/production/ksonnet/loki/gateway.libsonnet b/production/ksonnet/loki/gateway.libsonnet index 1b14faab6f03..b5b1eb2401ca 100644 --- a/production/ksonnet/loki/gateway.libsonnet +++ b/production/ksonnet/loki/gateway.libsonnet @@ -50,7 +50,7 @@ local k = import 'ksonnet-util/kausal.libsonnet'; listen 80; auth_basic “Prometheus”; auth_basic_user_file /etc/nginx/secrets/.htpasswd; - proxy_set_header X-Scope-OrgID %(gateway_tenant_id); + proxy_set_header X-Scope-OrgID %(gateway_tenant_id)s; location = /api/prom/push { proxy_pass http://distributor.%(namespace)s.svc.cluster.local:%(http_listen_port)s$request_uri; diff --git a/production/ksonnet/loki/loki.libsonnet b/production/ksonnet/loki/loki.libsonnet index 00d1fc1b85ea..7d017804e2a8 100644 --- a/production/ksonnet/loki/loki.libsonnet +++ b/production/ksonnet/loki/loki.libsonnet @@ -27,6 +27,9 @@ // Index Gateway support (import 'index-gateway.libsonnet') + +// BoltDB Shipper support. Anything that modifies the compactor must be imported after this. +(import 'boltdb_shipper.libsonnet') + + // Multi-zone ingester related config (import 'multi-zone.libsonnet') + @@ -34,7 +37,4 @@ (import 'memberlist.libsonnet') + // Prometheus ServiceMonitor -(import 'servicemonitor.libsonnet') + - -// BoltDB Shipper support. This should be the last one to get imported. -(import 'boltdb_shipper.libsonnet') +(import 'servicemonitor.libsonnet') diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json b/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json index 083c52e97448..44c7dc4ac7e1 100644 --- a/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json +++ b/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json @@ -87,7 +87,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\")\n)", + "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-read\", route=~\"api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\")\n)", "legendFormat": "{{status}}", "refId": "A" } @@ -183,7 +183,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_push|loki_api_v1_push\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))", + "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\", route=~\"api_prom_push|loki_api_v1_push\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))", "legendFormat": "{{status}}", "refId": "A" } @@ -229,102 +229,6 @@ "alignLevel": null } }, - { - "aliasColors": { }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fieldConfig": { - "defaults": { - "custom": { } - }, - "overrides": [ ] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 5, - "w": 4, - "x": 8, - "y": 1 - }, - "hiddenSeries": false, - "id": 11, - "legend": { - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [ ] - }, - "panels": [ ], - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [ ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "topk(5, sum by (name,level) (rate(promtail_custom_bad_words_total{cluster=\"$cluster\", exported_namespace=\"$namespace\"}[$__interval])) - \nsum by (name,level) (rate(promtail_custom_bad_words_total{cluster=\"$cluster\", exported_namespace=\"$namespace\"}[$__interval] offset 1h)))", - "legendFormat": "{{name}}-{{level}}", - "refId": "A" - } - ], - "thresholds": [ ], - "timeFrom": null, - "timeRegions": [ ], - "timeShift": null, - "title": "Bad Words", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, { "aliasColors": { }, "bars": false, @@ -662,17 +566,17 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3", + "expr": "histogram_quantile(0.99, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/(loki|enterprise-logs)-write\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3", "legendFormat": ".99", "refId": "A" }, { - "expr": "histogram_quantile(0.75, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3", + "expr": "histogram_quantile(0.75, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/(loki|enterprise-logs)-write\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3", "legendFormat": ".9", "refId": "B" }, { - "expr": "histogram_quantile(0.5, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3", + "expr": "histogram_quantile(0.5, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/(loki|enterprise-logs)-write\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3", "legendFormat": ".5", "refId": "C" } diff --git a/production/loki-mixin-compiled/dashboards/loki-operational.json b/production/loki-mixin-compiled/dashboards/loki-operational.json index 04c710b4535d..3fe1006ca775 100644 --- a/production/loki-mixin-compiled/dashboards/loki-operational.json +++ b/production/loki-mixin-compiled/dashboards/loki-operational.json @@ -87,7 +87,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\")\n)", + "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"($namespace)/query-frontend\", route=~\"api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\")\n)", "legendFormat": "{{status}}", "refId": "A" } @@ -183,7 +183,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_push|loki_api_v1_push\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))", + "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=~\"($namespace)/distributor\", route=~\"api_prom_push|loki_api_v1_push\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))", "legendFormat": "{{status}}", "refId": "A" } @@ -662,17 +662,17 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3", + "expr": "histogram_quantile(0.99, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/distributor\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3", "legendFormat": ".99", "refId": "A" }, { - "expr": "histogram_quantile(0.75, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3", + "expr": "histogram_quantile(0.75, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/distributor\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3", "legendFormat": ".9", "refId": "B" }, { - "expr": "histogram_quantile(0.5, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"$namespace/cortex-gw(-internal)?\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3", + "expr": "histogram_quantile(0.5, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/distributor\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3", "legendFormat": ".5", "refId": "C" } diff --git a/production/loki-mixin/config.libsonnet b/production/loki-mixin/config.libsonnet index 5e5a84ddd8cc..e0b09677ea45 100644 --- a/production/loki-mixin/config.libsonnet +++ b/production/loki-mixin/config.libsonnet @@ -15,6 +15,11 @@ // Enable dashboard and panels for Grafana Labs internal components. internal_components: false, + promtail: { + // Whether or not to include promtail specific dashboards + enabled: true, + }, + // SSD related configuration for dashboards. ssd: { // Support Loki SSD mode on dashboards. diff --git a/production/loki-mixin/dashboards/loki-operational.libsonnet b/production/loki-mixin/dashboards/loki-operational.libsonnet index 126b8e912af4..e8f5d9824874 100644 --- a/production/loki-mixin/dashboards/loki-operational.libsonnet +++ b/production/loki-mixin/dashboards/loki-operational.libsonnet @@ -19,11 +19,16 @@ local utils = import 'mixin-utils/utils.libsonnet'; 'Ingester', ], + hiddenPanels:: if $._config.promtail.enabled then [] else [ + 'Bad Words', + ], + jobMatchers:: { cortexgateway: [utils.selector.re('job', '($namespace)/cortex-gw(-internal)?')], distributor: [utils.selector.re('job', '($namespace)/%s' % (if $._config.ssd.enabled then '%s-write' % $._config.ssd.pod_prefix_matcher else 'distributor'))], ingester: [utils.selector.re('job', '($namespace)/%s' % (if $._config.ssd.enabled then '%s-write' % $._config.ssd.pod_prefix_matcher else 'ingester.*'))], querier: [utils.selector.re('job', '($namespace)/%s' % (if $._config.ssd.enabled then '%s-read' % $._config.ssd.pod_prefix_matcher else 'querier'))], + queryFrontend: [utils.selector.re('job', '($namespace)/%s' % (if $._config.ssd.enabled then '%s-read' % $._config.ssd.pod_prefix_matcher else 'query-frontend'))], }, podMatchers:: { @@ -136,6 +141,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; std.rstripChars(matcherStr('querier'), ',') ), + local replaceAllMatchers(expr) = replaceMatchers(replaceClusterMatchers(expr)), @@ -147,12 +153,33 @@ local utils = import 'mixin-utils/utils.libsonnet'; local isRowHidden(row) = std.member(dashboards['loki-operational.json'].hiddenRows, row), + local isPanelHidden(panelTitle) = + std.member(dashboards['loki-operational.json'].hiddenPanels, panelTitle), + + local replaceCortexGateway(expr, replacement) = if $._config.internal_components then + expr + else + std.strReplace( + expr, + 'job=~"$namespace/cortex-gw(-internal)?"', + matcherStr(replacement, matcher='job', sep='') + ), + + local removeInternalComponents(title, expr) = if (title == 'Queries/Second') then + replaceCortexGateway(expr, 'queryFrontend') + else if (title == 'Pushes/Second') then + replaceCortexGateway(expr, 'distributor') + else if (title == 'Push Latency') then + replaceCortexGateway(expr, 'distributor') + else + replaceAllMatchers(expr), + panels: [ p { datasource: selectDatasource(super.datasource), targets: if std.objectHas(p, 'targets') then [ e { - expr: replaceAllMatchers(e.expr), + expr: removeInternalComponents(p.title, e.expr), } for e in p.targets ] else [], @@ -161,7 +188,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; datasource: selectDatasource(super.datasource), targets: if std.objectHas(sp, 'targets') then [ e { - expr: replaceAllMatchers(e.expr), + expr: removeInternalComponents(p.title, e.expr), } for e in sp.targets ] else [], @@ -170,15 +197,17 @@ local utils = import 'mixin-utils/utils.libsonnet'; datasource: selectDatasource(super.datasource), targets: if std.objectHas(ssp, 'targets') then [ e { - expr: replaceAllMatchers(e.expr), + expr: removeInternalComponents(p.title, e.expr), } for e in ssp.targets ] else [], } for ssp in sp.panels + if !(isPanelHidden(ssp.title)) ] else [], } for sp in p.panels + if !(isPanelHidden(sp.title)) ] else [], title: if !($._config.ssd.enabled && p.type == 'row') then p.title else if p.title == 'Distributor' then 'Write Path' @@ -186,7 +215,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; else p.title, } for p in super.panels - if !(p.type == 'row' && isRowHidden(p.title)) + if !(p.type == 'row' && isRowHidden(p.title)) && !(isPanelHidden(p.title)) ], } + $.dashboard('Loki / Operational', uid='operational') diff --git a/production/loki-mixin/mixin-ssd.libsonnet b/production/loki-mixin/mixin-ssd.libsonnet index 1dcd2d161ebc..01c59bb6ab7c 100644 --- a/production/loki-mixin/mixin-ssd.libsonnet +++ b/production/loki-mixin/mixin-ssd.libsonnet @@ -4,6 +4,13 @@ grafanaDashboardFolder: 'Loki SSD', _config+:: { + internal_components: false, + + // By default the helm chart uses the Grafana Agent instead of promtail + promtail+: { + enabled: false, + }, + ssd+: { enabled: true, }, diff --git a/production/nomad/loki-distributed/job.nomad.hcl b/production/nomad/loki-distributed/job.nomad.hcl index add465ed07f0..4bda0ee146cd 100644 --- a/production/nomad/loki-distributed/job.nomad.hcl +++ b/production/nomad/loki-distributed/job.nomad.hcl @@ -128,6 +128,7 @@ job "loki" { interval = "20s" timeout = "1s" } + } task "ruler" { driver = "docker" @@ -503,6 +504,7 @@ job "loki" { interval = "20s" timeout = "1s" } + } task "query-frontend" { driver = "docker" diff --git a/tools/dev/k3d/Makefile b/tools/dev/k3d/Makefile index c944f426e667..bf2a9eace5c5 100644 --- a/tools/dev/k3d/Makefile +++ b/tools/dev/k3d/Makefile @@ -6,44 +6,50 @@ IMAGE_TAG := $(shell ../../../tools/image-tag) EXISTING_REGISTRY_PORT := $(shell k3d registry list -o json | jq -r '.[] | select(.name == "k3d-grafana") | .portMappings."5000/tcp" | .[0].HostPort') REGISTRY_PORT ?= $(or $(EXISTING_REGISTRY_PORT),46453) +# Time in seconds to wait for completion of cluster operation, run 'make SLEEP=10 ' +SLEEP := 5 +# For verbose output, run 'make DEBUG=true ' +DEBUG := false +HELM := helm --debug=$(DEBUG) + enterprise-logs: prepare-gel helm-cluster $(MAKE) -C $(CURDIR) apply-enterprise-helm-cluster - echo "Waiting 5s for cluster to be ready for helm installation." + echo "Waiting $(SLEEP)s for cluster to be ready for helm installation." echo "The helm install will take a while. It's useful to monitor progress using a tool like k9s." - # wait 5s for tk apply to finish and cluster is ready for helm install - sleep 5 + # wait for tk apply to finish and cluster is ready for helm install + sleep $(SLEEP) $(MAKE) -C $(CURDIR) helm-install-enterprise-logs echo "Helm installation finished. You can tear down this cluster with make down." enterprise-logs-ha-single-binary: prepare-gel helm-cluster $(MAKE) -C $(CURDIR) apply-enterprise-helm-cluster - echo "Waiting 5s for cluster to be ready for helm installation." + echo "Waiting $(SLEEP)s for cluster to be ready for helm installation." echo "The helm install will take a while. It's useful to monitor progress using a tool like k9s." - # wait 5s for tk apply to finish and cluster is ready for helm install - sleep 5 + # wait for tk apply to finish and cluster is ready for helm install + sleep $(SLEEP) $(MAKE) -C $(CURDIR) helm-install-enterprise-logs-ha-single-binary echo "Helm installation finished. You can tear down this cluster with make down." loki: prepare helm-cluster $(MAKE) -C $(CURDIR) apply-loki-helm-cluster - echo "Waiting 5s for cluster to be ready for helm installation." - # wait 5s for tk apply to finish and cluster is ready for helm install - sleep 5 + echo "Waiting $(SLEEP)s for cluster to be ready for helm installation." + # wait for tk apply to finish and cluster is ready for helm install + sleep $(SLEEP) $(MAKE) -C $(CURDIR) helm-install-loki echo "Helm installation finished. You can tear down this cluster with make down." loki-ha-single-binary: prepare helm-cluster $(MAKE) -C $(CURDIR) apply-loki-helm-cluster - echo "Waiting 5s for cluster to be ready for helm installation." - # wait 5s for tk apply to finish and cluster is ready for helm install - sleep 5 + echo "Waiting $(SLEEP)s for cluster to be ready for helm installation." + # wait for tk apply to finish and cluster is ready for helm install + sleep $(SLEEP) $(MAKE) -C $(CURDIR) helm-install-loki-ha-single-binary echo "Helm installation finished. You can tear down this cluster with make down." helm-cluster: prepare $(CURDIR)/scripts/create_cluster.sh helm-cluster $(REGISTRY_PORT) - # wait 5s for the cluster to be ready - sleep 5 + # wait for the cluster to be ready + sleep $(SLEEP) apply-enterprise-helm-cluster: tk apply --ext-code enterprise=true environments/helm-cluster @@ -51,16 +57,19 @@ apply-enterprise-helm-cluster: apply-loki-helm-cluster: tk apply --ext-code enterprise=false environments/helm-cluster +apply-empty-helm-cluster: + tk apply --ext-code enterprise=false environments/helm-cluster/empty.jsonnet + down: k3d cluster delete helm-cluster add-repos: - helm repo add --force-update prometheus-community https://prometheus-community.github.io/helm-charts - helm repo add --force-update grafana https://grafana.github.io/helm-charts - helm repo add --force-update minio https://charts.min.io/ + $(HELM) repo add --force-update prometheus-community https://prometheus-community.github.io/helm-charts + $(HELM) repo add --force-update grafana https://grafana.github.io/helm-charts + $(HELM) repo add --force-update minio https://charts.min.io/ update-repos: add-repos - helm repo update + $(HELM) repo update tk tool charts vendor jb update @@ -111,37 +120,46 @@ build-latest-image: HELM_DIR := $(shell cd $(CURDIR)/../../../production/helm/loki && pwd) helm-install-enterprise-logs: - helm install enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs.yaml" + $(HELM) install enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs.yaml" helm-upgrade-enterprise-logs: - helm upgrade enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs.yaml" + $(HELM) upgrade enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs.yaml" helm-uninstall-enterprise-logs: - helm uninstall enterprise-logs-test-fixture -n loki + $(HELM) uninstall enterprise-logs-test-fixture -n loki helm-install-enterprise-logs-ha-single-binary: - helm install enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs-ha-single-binary.yaml" + $(HELM) install enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs-ha-single-binary.yaml" helm-upgrade-enterprise-logs-ha-single-binary: - helm upgrade enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs-ha-single-binary.yaml" + $(HELM) upgrade enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs-ha-single-binary.yaml" helm-uninstall-enterprise-logs-ha-single-binary: - helm uninstall enterprise-logs-test-fixture -n loki + $(HELM) uninstall enterprise-logs-test-fixture -n loki helm-install-loki: - helm install loki "$(HELM_DIR)" -n loki --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/loki.yaml" + $(HELM) install loki "$(HELM_DIR)" -n loki --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/loki.yaml" helm-upgrade-loki: - helm upgrade loki "$(HELM_DIR)" -n loki --values "$(CURDIR)/environments/helm-cluster/values/loki.yaml" + $(HELM) upgrade loki "$(HELM_DIR)" -n loki --values "$(CURDIR)/environments/helm-cluster/values/loki.yaml" helm-uninstall-loki: - helm uninstall loki -n loki + $(HELM) uninstall loki -n loki helm-install-loki-ha-single-binary: - helm install loki-single-binary "$(HELM_DIR)" -n loki --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/loki-ha-single-binary.yaml" + $(HELM) install loki-single-binary "$(HELM_DIR)" -n loki --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/loki-ha-single-binary.yaml" helm-upgrade-loki-ha-single-binary: - helm upgrade loki-single-binary "$(HELM_DIR)" -n loki --values "$(CURDIR)/environments/helm-cluster/values/loki-ha-single-binary.yaml" + $(HELM) upgrade loki-single-binary "$(HELM_DIR)" -n loki --values "$(CURDIR)/environments/helm-cluster/values/loki-ha-single-binary.yaml" helm-uninstall-loki-binary: - helm uninstall loki-single-binary -n loki + $(HELM) uninstall loki-single-binary -n loki + +helm-install-kube-state-metrics: + helm install kube-state-metrics --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/kube-state-metrics.yaml + +helm-install-enterprise-logs-cloud-monitoring: + helm install enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --create-namespace --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs-cloud-monitoring.yaml" + +helm-upgrade-enterprise-logs-cloud-monitoring: + helm upgrade enterprise-logs-test-fixture "$(HELM_DIR)" -n loki --values "$(CURDIR)/environments/helm-cluster/values/enterprise-logs-cloud-monitoring.yaml" diff --git a/tools/dev/k3d/README.md b/tools/dev/k3d/README.md index ba8803974df6..4e9a28fe9230 100644 --- a/tools/dev/k3d/README.md +++ b/tools/dev/k3d/README.md @@ -42,4 +42,10 @@ The `helm-cluster` environment is designed for spinning up a cluster with just G 1. `make helm-cluster` 1. `make helm-install-enterprise-logs` - This step will take a while. The `provisioner` is dependent on the `tokengen` job and for the Admin API to be in a healthy state. Be patient and it will evenutally complete. Once the `provisioner` job has completed, the Loki Canaries will come online, and Grafana will start, as both are waiting on secrets provisioned by the `provisioner`. +The installation step executes a set of jobs required for the enterprise-logs deployment: +1) The `tokengen` job generates an admin-api token, stores it in the object storage and creates a Kubernetes secret. +1) The `provisioner` job depends on the `tokengen` job to create the Kubernetes secret and on the Admin API to be in a healthy state. +The tokengen Kubernetes secret will be used to create the input admin resources via the Admin API. +Afterwards, a new Kubernetes secret for each newly generated token is created. +1) Both the Loki Canaries and Grafana depend on the secrets provisioned by the `provisioner`. +Therefore, once the `provisioner` job is completed, these components will become online. diff --git a/tools/dev/k3d/environments/helm-cluster/empty.jsonnet b/tools/dev/k3d/environments/helm-cluster/empty.jsonnet new file mode 100644 index 000000000000..48532f0edb21 --- /dev/null +++ b/tools/dev/k3d/environments/helm-cluster/empty.jsonnet @@ -0,0 +1,34 @@ +local k = import 'github.com/grafana/jsonnet-libs/ksonnet-util/kausal.libsonnet'; +local tanka = import 'github.com/grafana/jsonnet-libs/tanka-util/main.libsonnet'; +local configMap = k.core.v1.configMap; + +local spec = (import './spec.json').spec; + +{ + _config+:: { + namespace: spec.namespace, + }, + + lokiNamespace: k.core.v1.namespace.new('loki'), + + gelLicenseSecret: k.core.v1.secret.new('gel-license', {}, type='Opaque') + + k.core.v1.secret.withStringData({ + 'license.jwt': importstr '../../secrets/gel.jwt', + }) + + k.core.v1.secret.metadata.withNamespace('loki'), + local grafanaCloudCredentials = import '../../secrets/grafana-cloud-credentials.json', + grafanaCloudMetricsCredentials: k.core.v1.secret.new('grafana-cloud-metrics-credentials', {}, type='Opaque') + + k.core.v1.secret.withStringData({ + username: '%d' % grafanaCloudCredentials.metrics.username, + password: grafanaCloudCredentials.metrics.password, + }) + + k.core.v1.secret.metadata.withNamespace('loki'), + grafanaCloudLogsCredentials: k.core.v1.secret.new('grafana-cloud-logs-credentials', {}, type='Opaque') + + k.core.v1.secret.withStringData({ + username: '%d' % grafanaCloudCredentials.logs.username, + password: grafanaCloudCredentials.logs.password, + }) + + k.core.v1.secret.metadata.withNamespace('loki'), + + +} diff --git a/tools/dev/k3d/environments/helm-cluster/spec.json b/tools/dev/k3d/environments/helm-cluster/spec.json index de3524e8f0f3..ebfcf5405fd9 100644 --- a/tools/dev/k3d/environments/helm-cluster/spec.json +++ b/tools/dev/k3d/environments/helm-cluster/spec.json @@ -6,7 +6,7 @@ "namespace": "environments/helm-cluster/main.jsonnet" }, "spec": { - "apiServer": "https://0.0.0.0:38539", + "apiServer": "https://0.0.0.0:33931", "namespace": "k3d-helm-cluster", "resourceDefaults": {}, "expectVersions": {} diff --git a/tools/dev/k3d/environments/helm-cluster/values/enterprise-logs-cloud-monitoring.yaml b/tools/dev/k3d/environments/helm-cluster/values/enterprise-logs-cloud-monitoring.yaml new file mode 100644 index 000000000000..1ee3c8de21cc --- /dev/null +++ b/tools/dev/k3d/environments/helm-cluster/values/enterprise-logs-cloud-monitoring.yaml @@ -0,0 +1,43 @@ +--- +loki: + querier: + multi_tenant_queries_enabled: true +enterprise: + enabled: true + adminToken: + secret: "gel-admin-token" + useExternalLicense: true + externalLicenseName: gel-license + provisioner: + provisionedSecretPrefix: "provisioned-secret" +monitoring: + dashboards: + enabled: false + rules: + enabled: false + selfMonitoring: + tenant: + name: loki + logsInstance: + clients: + - url: https://logs-prod-us-central1.grafana.net/loki/api/v1/push + basicAuth: + username: + name: grafana-cloud-logs-credentials + key: username + password: + name: grafana-cloud-logs-credentials + key: password + serviceMonitor: + metricsInstance: + remoteWrite: + - url: https://prometheus-blocks-prod-us-central1.grafana.net/api/prom/push + basicAuth: + username: + name: grafana-cloud-metrics-credentials + key: username + password: + name: grafana-cloud-metrics-credentials + key: password +minio: + enabled: true diff --git a/tools/dev/k3d/environments/helm-cluster/values/kube-state-metrics.yaml b/tools/dev/k3d/environments/helm-cluster/values/kube-state-metrics.yaml new file mode 100644 index 000000000000..ed97d539c095 --- /dev/null +++ b/tools/dev/k3d/environments/helm-cluster/values/kube-state-metrics.yaml @@ -0,0 +1 @@ +--- diff --git a/tools/dev/loki-boltdb-storage-s3/dev.dockerfile b/tools/dev/loki-boltdb-storage-s3/dev.dockerfile index d1b073b3e3ed..8c1fc1a95ed5 100644 --- a/tools/dev/loki-boltdb-storage-s3/dev.dockerfile +++ b/tools/dev/loki-boltdb-storage-s3/dev.dockerfile @@ -1,8 +1,8 @@ -FROM golang:1.18.5 +FROM golang:1.20.1 ENV CGO_ENABLED=0 RUN go install github.com/go-delve/delve/cmd/dlv@v1.9.0 -FROM alpine:3.16.2 +FROM alpine:3.16.4 RUN mkdir /loki WORKDIR /loki diff --git a/tools/doc-generator/parse/root_blocks.go b/tools/doc-generator/parse/root_blocks.go index 7d656b4b23bb..1c3e2b269ef0 100644 --- a/tools/doc-generator/parse/root_blocks.go +++ b/tools/doc-generator/parse/root_blocks.go @@ -24,6 +24,7 @@ import ( "github.com/grafana/loki/pkg/scheduler" "github.com/grafana/loki/pkg/storage" "github.com/grafana/loki/pkg/storage/chunk/cache" + "github.com/grafana/loki/pkg/storage/chunk/client/alibaba" "github.com/grafana/loki/pkg/storage/chunk/client/aws" "github.com/grafana/loki/pkg/storage/chunk/client/azure" "github.com/grafana/loki/pkg/storage/chunk/client/baidubce" @@ -200,6 +201,11 @@ var ( StructType: reflect.TypeOf(azure.BlobStorageConfig{}), Desc: "The azure_storage_config block configures the connection to Azure object storage backend.", }, + { + Name: "alibabacloud_storage_config", + StructType: reflect.TypeOf(alibaba.OssConfig{}), + Desc: "The alibabacloud_storage_config block configures the connection to Alibaba Cloud Storage object storage backend.", + }, { Name: "gcs_storage_config", StructType: reflect.TypeOf(gcp.GCSConfig{}), diff --git a/tools/lambda-promtail/Dockerfile b/tools/lambda-promtail/Dockerfile index 0b3e0a141093..f0bc66ad460f 100644 --- a/tools/lambda-promtail/Dockerfile +++ b/tools/lambda-promtail/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.5-alpine AS build-image +FROM golang:1.20.1-alpine AS build-image COPY tools/lambda-promtail /src/lambda-promtail WORKDIR /src/lambda-promtail @@ -12,7 +12,7 @@ RUN go mod download RUN go build -o ./main -tags lambda.norpc -ldflags="-s -w" lambda-promtail/*.go -FROM alpine:3.16.2 +FROM alpine:3.16.4 WORKDIR /app diff --git a/tools/lambda-promtail/Makefile b/tools/lambda-promtail/Makefile index 884115aa7f07..202615f5b641 100644 --- a/tools/lambda-promtail/Makefile +++ b/tools/lambda-promtail/Makefile @@ -1,7 +1,12 @@ -all: build +all: clean test build + +GOTEST ?= go test build: GOOS=linux CGO_ENABLED=0 go build -o ./main lambda-promtail/*.go +test: + $(GOTEST) ./... + clean: - rm main + rm -f main diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod index c23c6a5a36a1..3163382f7df9 100644 --- a/tools/lambda-promtail/go.mod +++ b/tools/lambda-promtail/go.mod @@ -1,113 +1,119 @@ module main -go 1.17 +go 1.19 require ( github.com/aws/aws-lambda-go v1.26.0 - github.com/aws/aws-sdk-go-v2 v1.11.2 - github.com/aws/aws-sdk-go-v2/config v1.11.1 + github.com/aws/aws-sdk-go-v2 v1.16.0 + github.com/aws/aws-sdk-go-v2/config v1.15.1 github.com/aws/aws-sdk-go-v2/service/s3 v1.22.0 + github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 - github.com/grafana/dskit v0.0.0-20220105080720-01ce9286d7d5 - github.com/grafana/loki v1.6.2-0.20220128102010-431d018ec64f - github.com/prometheus/common v0.32.1 + github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2 + github.com/grafana/loki v1.6.2-0.20230216091802-4e4359e67c6c + github.com/prometheus/common v0.39.0 + github.com/stretchr/testify v1.8.1 ) require ( github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect - github.com/armon/go-metrics v0.3.9 // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/armon/go-metrics v0.4.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.6.5 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.2 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.11.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.7.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.12.0 // indirect - github.com/aws/smithy-go v1.9.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.16.1 // indirect + github.com/aws/smithy-go v1.11.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/coreos/etcd v3.3.25+incompatible // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect - github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect + github.com/coreos/go-systemd/v22 v22.4.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dennwc/varint v1.0.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/felixge/httpsnoop v1.0.1 // indirect - github.com/go-kit/log v0.2.0 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/fatih/color v1.14.1 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect - github.com/gogo/status v1.1.0 // indirect + github.com/gogo/status v1.1.1 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.2 // indirect github.com/gorilla/mux v1.8.0 // indirect + github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765 // indirect + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect - github.com/hashicorp/consul/api v1.12.0 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/consul/api v1.18.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v0.16.2 // indirect + github.com/hashicorp/go-hclog v1.2.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect - github.com/hashicorp/go-multierror v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/memberlist v0.3.0 // indirect - github.com/hashicorp/serf v0.9.6 // indirect + github.com/hashicorp/golang-lru v0.6.0 // indirect + github.com/hashicorp/memberlist v0.5.0 // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mattn/go-colorable v0.1.9 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/miekg/dns v1.1.45 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/miekg/dns v1.1.50 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.11.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - github.com/prometheus/prometheus v1.8.2-0.20211119115433-692a54649ed7 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/exporter-toolkit v0.8.2 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/prometheus v0.41.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver v2.4.0+incompatible // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/stretchr/objx v0.2.0 // indirect - github.com/stretchr/testify v1.7.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect - github.com/weaveworks/common v0.0.0-20211015155308-ebe5bdc2c89e // indirect + github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d // indirect github.com/weaveworks/promrus v1.2.0 // indirect - go.etcd.io/etcd v3.3.25+incompatible // indirect - go.etcd.io/etcd/api/v3 v3.5.0 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect - go.etcd.io/etcd/client/v3 v3.5.0 // indirect - go.uber.org/atomic v1.9.0 // indirect - go.uber.org/multierr v1.7.0 // indirect - go.uber.org/zap v1.19.1 // indirect - golang.org/x/mod v0.5.1 // indirect - golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98 // indirect - golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect - golang.org/x/tools v0.1.8 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb // indirect - google.golang.org/grpc v1.40.1 // indirect - google.golang.org/protobuf v1.27.1 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + go.etcd.io/etcd/api/v3 v3.5.4 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect + go.etcd.io/etcd/client/v3 v3.5.4 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/goleak v1.2.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.21.0 // indirect + golang.org/x/crypto v0.5.0 // indirect + golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 // indirect + golang.org/x/mod v0.7.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/oauth2 v0.4.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.4.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/grpc v1.52.3 // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) replace k8s.io/client-go => k8s.io/client-go v0.21.0 diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum index d307953b81e3..c1d9c7e7e80e 100644 --- a/tools/lambda-promtail/go.sum +++ b/tools/lambda-promtail/go.sum @@ -1,14 +1,11 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -28,16 +25,24 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= -cloud.google.com/go/bigtable v1.3.0/go.mod h1:z5EyKrPE8OQmeg4h5MNdKvuSnI9CCT49Ki3f23aBzio= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -47,380 +52,113 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v58.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.3/go.mod h1:4bJZhUhcq8LB20TruwHbAQsmUs2Xh+QR7utuJpLXX3A= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= -github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= -github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= -github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= -github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= -github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= -github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= +github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-lambda-go v1.26.0 h1:6ujqBpYF7tdZcBvPIccs98SpeGfrt/UOVEiexfNIdHA= github.com/aws/aws-lambda-go v1.26.0/go.mod h1:jJmlefzPfGnckuHdXX7/80O3BvUUi12XOkbv4w9SGLU= -github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= -github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go v1.42.8/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aws/aws-sdk-go-v2 v1.11.2 h1:SDiCYqxdIYi6HgQfAWRhgdZrdnOuGyLDJVRSWLeHWvs= +github.com/aws/aws-sdk-go v1.44.187 h1:D5CsRomPnlwDHJCanL2mtaLIcbhjiWxNh5j8zvaWdJA= github.com/aws/aws-sdk-go-v2 v1.11.2/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= +github.com/aws/aws-sdk-go-v2 v1.16.0 h1:cBAYjiiexRAg9v2z9vb6IdxAa7ef4KCtjW7w7e3GxGo= +github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 h1:yVUAwvJC/0WNPbyl0nA3j1L6CW1CN8wBubCRqtG7JLI= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0/go.mod h1:Xn6sxgRuIDflLRJFj5Ev7UxABIkNbccFPV/p8itDReM= -github.com/aws/aws-sdk-go-v2/config v1.11.1 h1:KXSjb7ZMLRtjxClFptukTYibiOqJS9NwBO+9WD3UMto= -github.com/aws/aws-sdk-go-v2/config v1.11.1/go.mod h1:VvfkzUhVtntSg1JfGFMSKS0CyiTZd3NqBxK5af4zsME= -github.com/aws/aws-sdk-go-v2/credentials v1.6.5 h1:ZrsO2js2v4T95rsCIWoAb/ck5+U1kwkizGdZHY+ni3s= -github.com/aws/aws-sdk-go-v2/credentials v1.6.5/go.mod h1:HWSOnsnqVMbLcWUmom6AN1cqhcLzLJ62AObW28CbYbU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.2 h1:KiN5TPOLrEjbGCvdTQR4t0U4T87vVwALZ5Bg3jpMqPY= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.2/go.mod h1:dF2F6tXEOgmW5X1ZFO/EPtWrcm7XkW07KNcJUGNtt4s= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.2 h1:XJLnluKuUxQG255zPNe+04izXl7GSyUVafIsgfv9aw4= +github.com/aws/aws-sdk-go-v2/config v1.15.1 h1:hTIZFepYESYyowQUBo47lu69WSxsYqGUILY9Nu8+7pY= +github.com/aws/aws-sdk-go-v2/config v1.15.1/go.mod h1:MZHGbuW2WnqIOQQBKu2ZkhTjuutZSTnn56TDq4QyydE= +github.com/aws/aws-sdk-go-v2/credentials v1.11.0 h1:gc4Uhs80s60nmLon5Z4JXWinX2BkAGT0YROoUT8h8U4= +github.com/aws/aws-sdk-go-v2/credentials v1.11.0/go.mod h1:EdV1ZFgtZ4XM5RDHWcRWK8H+xW5duNVBqWj2oLu7tRo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.1 h1:F9Je1nq5YXfMOv6451NHvMf6U0iTWeMnsG0MMIQoUmk= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.1/go.mod h1:Yph0XsTbQ5GGZ2+mO1a03P/SO9fdX3t1nejIp2tq79g= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.2/go.mod h1:SgKKNBIoDC/E1ZCDhhMW3yalWjwuLjMcpLzsM/QQnWo= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.2 h1:EauRoYZVNPlidZSZJDscjJBQ22JhVF2+tdteatax2Ak= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.7 h1:KUErSJgdqmqAPBWAp6Zx9CjL0YXfytXJeXcsWnuCM1c= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.7/go.mod h1:oB9nZcxH1cGq7NPGurVJwxrO2vmJ9mmEBayCwcAlmT8= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.2/go.mod h1:xT4XX6w5Sa3dhg50JrYyy3e4WPYo/+WjY/BXtqXVunU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.2 h1:IQup8Q6lorXeiA/rK72PeToWoWK8h7VAPgHNWdSrtgE= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.2/go.mod h1:VITe/MdW6EMXPb0o0txu/fsonXbMHUU2OC2Qp7ivU4o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.1 h1:feVfa9eJonhJiss7g51ikjNB2DrUzbNZNvPL8pw/54k= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.1/go.mod h1:K4vz7lRYCyLYpYAMCLObODahFgARdD3YVa0MvQte9Co= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8 h1:adr3PfiggFtqgFofAMUFCtdvwzpf3QxPES4ezK4M3iI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8/go.mod h1:wLbQYt36AJqaRZUQiCNXzbtkNigyPfKHrotHuIDiCy8= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 h1:lPLbw4Gn59uoKqvOfSnkJr54XWk5Ak1NK20ZEiSWb3U= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0/go.mod h1:80NaCIH9YU3rzTTs/J/ECATjXuRqzo/wB6ukO6MZ0XY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.2 h1:CKdUNKmuilw/KNmO2Q53Av8u+ZyXMC2M9aX8Z+c/gzg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.2/go.mod h1:FgR1tCsn8C6+Hf+N5qkfrE4IXvUL1RgW87sunJ+5J4I= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.1 h1:B/SPX7J+Y0Yrcjv60Nhbh1gC2uBN47SfN8JYre6Mp4M= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.1/go.mod h1:2Hhr9Eh1gJzDatwACX/ozAZ/ljq5vzvPRu5cdu25tzc= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.2 h1:GnPGH1FGc4fkn0Jbm/8r2+nPOwSJjYPyHSqFSvY1ii8= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.2/go.mod h1:eDUYjOYt4Uio7xfHi5jOsO393ZG8TSfZB92a3ZNadWM= github.com/aws/aws-sdk-go-v2/service/s3 v1.22.0 h1:J78RE/YNohCGbUyIbc3hr+UwnttfOn2dJUkNfvDkT30= github.com/aws/aws-sdk-go-v2/service/s3 v1.22.0/go.mod h1:lQ5AeEW2XWzu8hwQ3dCqZFWORQ3RntO0Kq135Xd9VCo= -github.com/aws/aws-sdk-go-v2/service/sso v1.7.0 h1:E4fxAg/UE8a6yiLZYv8/EP0uXKPPRImiMau4ift6S/g= -github.com/aws/aws-sdk-go-v2/service/sso v1.7.0/go.mod h1:KnIpszaIdwI33tmc/W/GGXyn22c1USYxA/2KyvoeDY0= -github.com/aws/aws-sdk-go-v2/service/sts v1.12.0 h1:7g0252k2TF3eA1DtfkTQB/tqI41YvbUPaolwTR0/ITc= -github.com/aws/aws-sdk-go-v2/service/sts v1.12.0/go.mod h1:UV2N5HaPfdbDpkgkz4sRzWCvQswZjdO1FfqCWl0t7RA= -github.com/aws/smithy-go v1.9.0 h1:c7FUdEqrQA1/UVKKCNDFQPNKGp4FQg3YW4Ck5SLTG58= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.1 h1:DyHctRsJIAWIvom1Itb4T84D2jwpIu+KIi3d0SFaswg= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.1/go.mod h1:CvFTucADIx7U/M44vjLs/ZttpQHdpxwK+62+dUGhDeY= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.1 h1:xsOtPAvHqhvQvBza5ohaUcfq1LceH2lZKMUGZJKiZiM= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.1/go.mod h1:Aq2/Qggh2oemSfyHH+EO4UBbgWG6zFCXLHYI4ILTY7w= github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g= +github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= -github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= -github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= -github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= -github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= -github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= -github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= -github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= -github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= -github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= -github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= -github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= -github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= -github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= -github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= -github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= -github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= -github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= -github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= -github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= -github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= -github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= -github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.25+incompatible h1:0GQEw6h3YnuOVdtwygkIfJ+Omx0tZ8/QkVyXI4LkbeY= -github.com/coreos/etcd v3.3.25+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= -github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU= +github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= -github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= -github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= -github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digitalocean/godo v1.71.0/go.mod h1:GBmu8MkjZmNARE7IXRPmkbbnocNN8+uBm0xbEVw2LCs= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= -github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -428,216 +166,50 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.0.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= -github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= -github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= -github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= -github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= -github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= -github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= -github.com/go-openapi/runtime v0.19.29/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= -github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= -github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= -github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= -github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= -github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= -github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= -github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= -github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= -github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= -github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= -github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= -github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= +github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -650,7 +222,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -669,18 +240,13 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -692,11 +258,11 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -706,7 +272,6 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200417002340-c6e0a841f49a/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -716,84 +281,59 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= -github.com/gophercloud/gophercloud v0.23.0/go.mod h1:MRw6uyLj8uCGbIvBlqL7QW67t0QtNZnzydUzewo1Ioc= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grafana/dskit v0.0.0-20220105080720-01ce9286d7d5 h1:IXo/V2+KKLYLD724qh3uRaZgAy3BV3HdtXuSs7lb3jU= -github.com/grafana/dskit v0.0.0-20220105080720-01ce9286d7d5/go.mod h1:M0/dlftwBvH7+hdNNpjMa/CUXD7gsew67mbkCuDlFXE= -github.com/grafana/loki v1.6.2-0.20220128102010-431d018ec64f h1:hh0L7UP8OHhNU4VPplTdrinL/CbiXaYrckNoclEg8Pc= -github.com/grafana/loki v1.6.2-0.20220128102010-431d018ec64f/go.mod h1:flXuzTvTn3/KEYb5hgMgqXpU2goZ26GtAaEKxhgZwUc= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2 h1:IOks+FXJ6iO/pfbaVEf4efNw+YzYBYNCkCabyrbkFTM= +github.com/grafana/dskit v0.0.0-20230201083518-528d8a7d52f2/go.mod h1:zj+5BNZAVmQafV583uLTAOzRr963KPdEm4d6NPmtbwg= +github.com/grafana/loki v1.6.2-0.20230216091802-4e4359e67c6c h1:4JjETlwJs5VJgM5iLdcwksrZSBkwfqGT94kj8e3Y3tM= +github.com/grafana/loki v1.6.2-0.20230216091802-4e4359e67c6c/go.mod h1:EmSFg/t1wTEcN9MU1nVq2R7y25pVcqOA+Qv7luldlIo= +github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765 h1:VXitROTlmZtLzvokNe8ZbUKpmwldM4Hy1zdNRO32jKU= +github.com/grafana/loki/pkg/push v0.0.0-20230127102416-571f88bc5765/go.mod h1:DhJMrd2QInI/1CNtTN43BZuTmkccdizW1jZ+F6aHkhY= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.14.4/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= -github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/api v1.12.0 h1:k3y1FYv6nuKyNTqj6w9gXOx5r5CfLj/k/euUeBXj1OY= -github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g= +github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4= +github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU= +github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= +github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -802,83 +342,30 @@ github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjG github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.2.3/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= -github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hetznercloud/hcloud-go v1.33.1/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME= -github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= -github.com/influxdata/flux v0.131.0/go.mod h1:CKvnYe6FHpTj/E0YGI7TcOZdGiYHoToOPSnoa12RtKI= -github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= -github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= -github.com/influxdata/influxdb v1.9.5/go.mod h1:4uPVvcry9KWQVWLxyT9641qpkRXUBN+xa0MJFFNNLKo= -github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= -github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= -github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= -github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= -github.com/influxdata/pkg-config v0.2.8/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= -github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= -github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= -github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= -github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= -github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= -github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -886,138 +373,55 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= -github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= -github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.2.1/go.mod h1:x/7+BoaKd4unViBmS2umdjYyVAmpFtBtEXZ0wou7FYQ= -github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= -github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= -github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= -github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= -github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= -github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= -github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= -github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.45 h1:g5fRIhm9nx7g8osrAvgb16QJfmyMsyOCb+J7LSv+Qzk= -github.com/miekg/dns v1.1.45/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1025,362 +429,124 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= -github.com/prometheus/alertmanager v0.23.0/go.mod h1:0MLTrjQI8EuVmvykEhcfr/7X0xmaDAZrqMgxIq3OXHk= -github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= -github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= -github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 h1:dTUS1vaLWq+Y6XKOTnrFpoVsQKLCbCp1OLj24TDi7oM= -github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= +github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= +github.com/prometheus/exporter-toolkit v0.8.2 h1:sbJAfBXQFkG6sUkbwBun8MNdzW9+wd5YfPYofbmj0YM= +github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/prometheus v0.0.0-20200609090129-a6600f564e3c/go.mod h1:S5n0C6tSgdnwWshBUceRx5G1OsjLv/EeZ9t3wIfEtsY= -github.com/prometheus/prometheus v1.8.2-0.20211119115433-692a54649ed7 h1:8rwRA5BKEAWtawaP4ozroDm3xMDrp9POAoT3HkQ3ZHw= -github.com/prometheus/prometheus v1.8.2-0.20211119115433-692a54649ed7/go.mod h1:outfylaI89+D5IO87TRPRmxfucIobTO3Rb0l2TKqpj0= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/prometheus v0.41.0 h1:+QR4QpzwE54zsKk2K7EUkof3tHxa3b/fyw7xJ4jR1Ns= +github.com/prometheus/prometheus v0.41.0/go.mod h1:Uu5817xm7ibU/VaDZ9pu1ssGzcpO9Bd+LyoZ76RpHyo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.3.13/go.mod h1:6nfka9aTXkUNha1p1cjeeyjDvcyh7jfjp0l8kGpDBok= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= -github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= -github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E= -github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/weaveworks/common v0.0.0-20210913144402-035033b78a78/go.mod h1:YU9FvnS7kUnRt6HY10G+2qHkwzP3n3Vb1XsXDsJTSp8= -github.com/weaveworks/common v0.0.0-20211015155308-ebe5bdc2c89e h1:B0gVGyVpjfWJWSRe027EkhmEype0a0Dt2uHVxcPrhfs= -github.com/weaveworks/common v0.0.0-20211015155308-ebe5bdc2c89e/go.mod h1:GWX2dQ7yjrgvqH0+d3kCJC5bsY8oOFwqjxFMHaRK4/k= +github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d h1:9Z/HiqeGN+LOnmotAMpFEQjuXZ4AGAVFG0rC1laP5Go= +github.com/weaveworks/common v0.0.0-20221201103051-7c2720a9024d/go.mod h1:Fnq3+U51tMkPRMC6Wr7zKGUeFFYX4YjNrNK50iU0fcE= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= -github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.etcd.io/etcd v3.3.25+incompatible h1:V1RzkZJj9LqsJRy+TUBgpWSbZXITLB819lstuTFoZOY= -go.etcd.io/etcd v3.3.25+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= -go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1389,66 +555,32 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1462,6 +594,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 h1:OvjRkcNHnf6/W5FZXSxODbxwD+X7fspczG7Jn/xQVD4= +golang.org/x/exp v0.0.0-20221212164502-fae10dda9338/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1476,6 +610,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1488,42 +623,24 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1531,39 +648,37 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98 h1:+6WJMRLHlD7X7frgp7TUZ36RnQzSf9wVVTNakEp+nqY= -golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1579,122 +694,81 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1703,18 +777,27 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1723,84 +806,58 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M= -golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -1817,23 +874,19 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w= -golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1863,36 +916,38 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6 google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1901,7 +956,6 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200420144010-e5e8543f8aeb/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1913,9 +967,7 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1923,6 +975,7 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -1940,27 +993,40 @@ google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb h1:ZrsicilzPCS/Xr8qtBZZLpy4P9TYXAfl49ctG1/5tgw= -google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -1977,8 +1043,14 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1 h1:pnP7OclFFFgFi4VHQDQDaoXUVauOFyktqTsqqgzFKbc= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ= +google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1992,53 +1064,31 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2046,56 +1096,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= -k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk= -k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= -k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= -k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= -k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.20.0/go.mod h1:Gm8eSIfQN6457haJuPaMxZw4wyP5k+ykPFlrhQDvhvw= -k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= \ No newline at end of file diff --git a/tools/lambda-promtail/lambda-promtail/cw.go b/tools/lambda-promtail/lambda-promtail/cw.go index 7632a7e1a329..8dba341b7370 100644 --- a/tools/lambda-promtail/lambda-promtail/cw.go +++ b/tools/lambda-promtail/lambda-promtail/cw.go @@ -13,7 +13,6 @@ import ( func parseCWEvent(ctx context.Context, b *batch, ev *events.CloudwatchLogsEvent) error { data, err := ev.AWSLogs.Parse() if err != nil { - fmt.Println("error parsing log event: ", err) return err } @@ -42,18 +41,18 @@ func parseCWEvent(ctx context.Context, b *batch, ev *events.CloudwatchLogsEvent) return nil } -func processCWEvent(ctx context.Context, ev *events.CloudwatchLogsEvent) error { - batch, err := newBatch(ctx) +func processCWEvent(ctx context.Context, ev *events.CloudwatchLogsEvent, pClient Client) error { + batch, err := newBatch(ctx, pClient) if err != nil { return err } err = parseCWEvent(ctx, batch, ev) if err != nil { - return err + return fmt.Errorf("error parsing log event: %s", err) } - err = sendToPromtail(ctx, batch) + err = pClient.sendToPromtail(ctx, batch) if err != nil { return err } diff --git a/tools/lambda-promtail/lambda-promtail/kinesis.go b/tools/lambda-promtail/lambda-promtail/kinesis.go index e7ba69e5caab..9b4b3fa31749 100644 --- a/tools/lambda-promtail/lambda-promtail/kinesis.go +++ b/tools/lambda-promtail/lambda-promtail/kinesis.go @@ -33,17 +33,17 @@ func parseKinesisEvent(ctx context.Context, b batchIf, ev *events.KinesisEvent) return nil } -func processKinesisEvent(ctx context.Context, ev *events.KinesisEvent) error { - batch, _ := newBatch(ctx) +func processKinesisEvent(ctx context.Context, ev *events.KinesisEvent, pClient Client) error { + batch, _ := newBatch(ctx, pClient) err := parseKinesisEvent(ctx, batch, ev) if err != nil { return err } - err = sendToPromtail(ctx, batch) + err = pClient.sendToPromtail(ctx, batch) if err != nil { return err } return nil -} +} \ No newline at end of file diff --git a/tools/lambda-promtail/lambda-promtail/logger.go b/tools/lambda-promtail/lambda-promtail/logger.go new file mode 100644 index 000000000000..69a366b45653 --- /dev/null +++ b/tools/lambda-promtail/lambda-promtail/logger.go @@ -0,0 +1,15 @@ +package main + +import ( + "os" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" +) + +func NewLogger(logLevel string) *log.Logger { + logger := log.NewLogfmtLogger(os.Stderr) + logger = level.NewFilter(logger, level.Allow(level.ParseDefault(logLevel, level.DebugValue()))) + logger = log.With(logger, "caller", log.DefaultCaller) + return &logger +} diff --git a/tools/lambda-promtail/lambda-promtail/main.go b/tools/lambda-promtail/lambda-promtail/main.go index 64c53879485a..42c387a2d635 100644 --- a/tools/lambda-promtail/lambda-promtail/main.go +++ b/tools/lambda-promtail/lambda-promtail/main.go @@ -10,6 +10,8 @@ import ( "strconv" "strings" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/backoff" "github.com/prometheus/common/model" "github.com/aws/aws-lambda-go/events" @@ -23,7 +25,7 @@ const ( maxErrMsgLen = 1024 - invalidExtraLabelsError = "Invalid value for environment variable EXTRA_LABELS. Expected a comma seperated list with an even number of entries. " + invalidExtraLabelsError = "Invalid value for environment variable EXTRA_LABELS. Expected a comma separated list with an even number of entries. " ) var ( @@ -51,8 +53,9 @@ func setupArguments() { fmt.Println("write address: ", writeAddress.String()) + omitExtraLabelsPrefix := os.Getenv("OMIT_EXTRA_LABELS_PREFIX") extraLabelsRaw = os.Getenv("EXTRA_LABELS") - extraLabels, err = parseExtraLabels(extraLabelsRaw) + extraLabels, err = parseExtraLabels(extraLabelsRaw, strings.EqualFold(omitExtraLabelsPrefix, "true")) if err != nil { panic(err) } @@ -96,11 +99,14 @@ func setupArguments() { if strings.EqualFold(print, "false") { printLogLine = false } - s3Clients = make(map[string]*s3.Client) } -func parseExtraLabels(extraLabelsRaw string) (model.LabelSet, error) { +func parseExtraLabels(extraLabelsRaw string, omitPrefix bool) (model.LabelSet, error) { + prefix := "__extra_" + if omitPrefix { + prefix = "" + } var extractedLabels = model.LabelSet{} extraLabelsSplit := strings.Split(extraLabelsRaw, ",") @@ -112,7 +118,7 @@ func parseExtraLabels(extraLabelsRaw string) (model.LabelSet, error) { return nil, fmt.Errorf(invalidExtraLabelsError) } for i := 0; i < len(extraLabelsSplit); i += 2 { - extractedLabels[model.LabelName("__extra_"+extraLabelsSplit[i])] = model.LabelValue(extraLabelsSplit[i+1]) + extractedLabels[model.LabelName(prefix+extraLabelsSplit[i])] = model.LabelValue(extraLabelsSplit[i+1]) } err := extractedLabels.Validate() if err != nil { @@ -128,10 +134,12 @@ func applyExtraLabels(labels model.LabelSet) model.LabelSet { func checkEventType(ev map[string]interface{}) (interface{}, error) { var s3Event events.S3Event + var s3TestEvent events.S3TestEvent var cwEvent events.CloudwatchLogsEvent var kinesisEvent events.KinesisEvent + var sqsEvent events.SQSEvent - types := [...]interface{}{&s3Event, &cwEvent, &kinesisEvent} + types := [...]interface{}{&s3Event, &s3TestEvent, &cwEvent, &kinesisEvent, &sqsEvent} j, _ := json.Marshal(ev) reader := strings.NewReader(string(j)) @@ -152,21 +160,42 @@ func checkEventType(ev map[string]interface{}) (interface{}, error) { } func handler(ctx context.Context, ev map[string]interface{}) error { + lvl, ok := os.LookupEnv("LOG_LEVEL") + if !ok { + lvl = "info" + } + log := NewLogger(lvl) + pClient := NewPromtailClient(&promtailClientConfig{ + backoff: &backoff.Config{ + MinBackoff: minBackoff, + MaxBackoff: maxBackoff, + MaxRetries: maxRetries, + }, + http: &httpClientConfig{ + timeout: timeout, + skipTlsVerify: skipTlsVerify, + }, + }, log) + event, err := checkEventType(ev) if err != nil { - fmt.Printf("invalid event: %s\n", ev) + level.Error(*pClient.log).Log("err", fmt.Errorf("invalid event: %s\n", ev)) return err } switch evt := event.(type) { case *events.S3Event: - return processS3Event(ctx, evt) + return processS3Event(ctx, evt, pClient, pClient.log) case *events.CloudwatchLogsEvent: - return processCWEvent(ctx, evt) + return processCWEvent(ctx, evt, pClient) case *events.KinesisEvent: - return processKinesisEvent(ctx, evt) + return processKinesisEvent(ctx, evt, pClient) + case *events.SQSEvent: + return processSQSEvent(ctx, evt) + // When setting up S3 Notification on a bucket, a test event is first sent, see: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-content-structure.html + case *events.S3TestEvent: + return nil } - return err } diff --git a/tools/lambda-promtail/lambda-promtail/main_test.go b/tools/lambda-promtail/lambda-promtail/main_test.go index 30226ad4b836..fbd47571f341 100644 --- a/tools/lambda-promtail/lambda-promtail/main_test.go +++ b/tools/lambda-promtail/lambda-promtail/main_test.go @@ -1,13 +1,14 @@ package main import ( + "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" - "testing" ) func TestLambdaPromtail_ExtraLabelsValid(t *testing.T) { - extraLabels, err := parseExtraLabels("A1,a,B2,b,C3,c,D4,d") + extraLabels, err := parseExtraLabels("A1,a,B2,b,C3,c,D4,d", false) require.Nil(t, err) require.Len(t, extraLabels, 4) require.Equal(t, model.LabelValue("a"), extraLabels["__extra_A1"]) @@ -17,19 +18,19 @@ func TestLambdaPromtail_ExtraLabelsValid(t *testing.T) { } func TestLambdaPromtail_ExtraLabelsMissingValue(t *testing.T) { - extraLabels, err := parseExtraLabels("A,a,B,b,C,c,D") + extraLabels, err := parseExtraLabels("A,a,B,b,C,c,D",false) require.Nil(t, extraLabels) require.Errorf(t, err, invalidExtraLabelsError) } func TestLambdaPromtail_ExtraLabelsInvalidNames(t *testing.T) { - extraLabels, err := parseExtraLabels("A!,%a,B?,$b,C-,c^") + extraLabels, err := parseExtraLabels("A!,%a,B?,$b,C-,c^", false) require.Nil(t, extraLabels) require.Errorf(t, err, "invalid name \"__extra_A!\"") } func TestLambdaPromtail_TestParseLabelsNoneProvided(t *testing.T) { - extraLabels, err := parseExtraLabels("") + extraLabels, err := parseExtraLabels("", false) require.Len(t, extraLabels, 0) require.Nil(t, err) } diff --git a/tools/lambda-promtail/lambda-promtail/promtail.go b/tools/lambda-promtail/lambda-promtail/promtail.go index 56efaa434a65..46fc3f9f0f1e 100644 --- a/tools/lambda-promtail/lambda-promtail/promtail.go +++ b/tools/lambda-promtail/lambda-promtail/promtail.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "context" - "crypto/tls" "fmt" "io" "net/http" @@ -12,6 +11,7 @@ import ( "strings" "time" + "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/grafana/dskit/backoff" @@ -38,6 +38,7 @@ type entry struct { type batch struct { streams map[string]*logproto.Stream size int + client Client } type batchIf interface { @@ -47,9 +48,10 @@ type batchIf interface { flushBatch(ctx context.Context) error } -func newBatch(ctx context.Context, entries ...entry) (*batch, error) { +func newBatch(ctx context.Context, pClient Client, entries ...entry) (*batch, error) { b := &batch{ streams: map[string]*logproto.Stream{}, + client: pClient, } for _, entry := range entries { @@ -123,34 +125,37 @@ func (b *batch) createPushRequest() (*logproto.PushRequest, int) { } func (b *batch) flushBatch(ctx context.Context) error { - err := sendToPromtail(ctx, b) + err := b.client.sendToPromtail(ctx, b) if err != nil { return err } - - b.streams = make(map[string]*logproto.Stream) + b.resetBatch() return nil } -func sendToPromtail(ctx context.Context, b *batch) error { +func (b *batch) resetBatch() { + b.streams = make(map[string]*logproto.Stream) + b.size = 0 +} + +func (c *promtailClient) sendToPromtail(ctx context.Context, b *batch) error { buf, _, err := b.encode() if err != nil { return err } - backoff := backoff.New(ctx, backoff.Config{minBackoff, maxBackoff, maxRetries}) + backoff := backoff.New(ctx, *c.config.backoff) var status int for { // send uses `timeout` internally, so `context.Background` is good enough. - status, err = send(context.Background(), buf) + status, err = c.send(context.Background(), buf) // Only retry 429s, 500s and connection-level errors. if status > 0 && status != 429 && status/100 != 5 { break } - - fmt.Printf("error sending batch, will retry, status: %d error: %s\n", status, err) + level.Error(*c.log).Log("err", fmt.Errorf("error sending batch, will retry, status: %d error: %s\n", status, err)) backoff.Wait() // Make sure it sends at least once before checking for retry. @@ -160,15 +165,15 @@ func sendToPromtail(ctx context.Context, b *batch) error { } if err != nil { - fmt.Printf("Failed to send logs! %s\n", err) + level.Error(*c.log).Log("err", fmt.Errorf("Failed to send logs! %s\n", err)) return err } return nil } -func send(ctx context.Context, buf []byte) (int, error) { - ctx, cancel := context.WithTimeout(ctx, timeout) +func (c *promtailClient) send(ctx context.Context, buf []byte) (int, error) { + ctx, cancel := context.WithTimeout(ctx, c.config.http.timeout) defer cancel() req, err := http.NewRequest("POST", writeAddress.String(), bytes.NewReader(buf)) @@ -190,17 +195,11 @@ func send(ctx context.Context, buf []byte) (int, error) { req.Header.Set("Authorization", "Bearer "+bearerToken) } - promtailClient := &http.Client{} - - if skipTlsVerify == true { - promtailClient = &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}} - } - - resp, err := promtailClient.Do(req.WithContext(ctx)) + resp, err := c.http.Do(req.WithContext(ctx)) if err != nil { return -1, err } - + defer resp.Body.Close() if resp.StatusCode/100 != 2 { scanner := bufio.NewScanner(io.LimitReader(resp.Body, maxErrMsgLen)) line := "" diff --git a/tools/lambda-promtail/lambda-promtail/promtail_client.go b/tools/lambda-promtail/lambda-promtail/promtail_client.go new file mode 100644 index 000000000000..470a760b5233 --- /dev/null +++ b/tools/lambda-promtail/lambda-promtail/promtail_client.go @@ -0,0 +1,53 @@ +package main + +import ( + "context" + "crypto/tls" + "net/http" + "net/url" + "time" + + "github.com/go-kit/log" + "github.com/grafana/dskit/backoff" +) + +type Client interface { + sendToPromtail(ctx context.Context, b *batch) error +} + +// Implements Client +type promtailClient struct { + config *promtailClientConfig + http *http.Client + log *log.Logger +} + +type promtailClientConfig struct { + backoff *backoff.Config + http *httpClientConfig + url *url.URL +} + +type httpClientConfig struct { + timeout time.Duration + skipTlsVerify bool +} + +func NewPromtailClient(cfg *promtailClientConfig, log *log.Logger) *promtailClient { + return &promtailClient{ + config: cfg, + http: NewHTTPClient(cfg.http), + log: log, + } +} + +func NewHTTPClient(cfg *httpClientConfig) *http.Client { + transport := http.DefaultTransport + if cfg.skipTlsVerify { + transport = &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} + } + return &http.Client{ + Timeout: cfg.timeout, + Transport: transport, + } +} diff --git a/tools/lambda-promtail/lambda-promtail/s3.go b/tools/lambda-promtail/lambda-promtail/s3.go index c3acc3c543c2..44ea4cc5346c 100644 --- a/tools/lambda-promtail/lambda-promtail/s3.go +++ b/tools/lambda-promtail/lambda-promtail/s3.go @@ -10,6 +10,8 @@ import ( "time" "github.com/aws/aws-lambda-go/events" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" @@ -38,33 +40,20 @@ const ( LB_LOG_TYPE string = "elasticloadbalancing" ) -func getS3Object(ctx context.Context, labels map[string]string) (io.ReadCloser, error) { +func getS3Client(ctx context.Context, region string) (*s3.Client, error) { var s3Client *s3.Client - if c, ok := s3Clients[labels["bucket_region"]]; ok { + if c, ok := s3Clients[region]; ok { s3Client = c } else { - cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(labels["bucket_region"])) + cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region)) if err != nil { return nil, err } s3Client = s3.NewFromConfig(cfg) - s3Clients[labels["bucket_region"]] = s3Client + s3Clients[region] = s3Client } - - obj, err := s3Client.GetObject(ctx, - &s3.GetObjectInput{ - Bucket: aws.String(labels["bucket"]), - Key: aws.String(labels["key"]), - ExpectedBucketOwner: aws.String(labels["bucketOwner"]), - }) - - if err != nil { - fmt.Printf("Failed to get object %s from bucket %s on account %s\n", labels["key"], labels["bucket"], labels["bucketOwner"]) - return nil, err - } - - return obj.Body, nil + return s3Client, nil } func parseS3Log(ctx context.Context, b *batch, labels map[string]string, obj io.ReadCloser) error { @@ -85,9 +74,9 @@ func parseS3Log(ctx context.Context, b *batch, labels map[string]string, obj io. } ls := model.LabelSet{ - model.LabelName("__aws_log_type"): model.LabelValue(logType), - model.LabelName(fmt.Sprintf("__aws_%s_lb", logType)): model.LabelValue(labels["src"]), - model.LabelName(fmt.Sprintf("__aws_%s_lb_owner", logType)): model.LabelValue(labels["account_id"]), + model.LabelName("__aws_log_type"): model.LabelValue(logType), + model.LabelName(fmt.Sprintf("__aws_%s", logType)): model.LabelValue(labels["src"]), + model.LabelName(fmt.Sprintf("__aws_%s_owner", logType)): model.LabelValue(labels["account_id"]), } ls = applyExtraLabels(ls) @@ -142,31 +131,37 @@ func getLabels(record events.S3EventRecord) (map[string]string, error) { return labels, nil } -func processS3Event(ctx context.Context, ev *events.S3Event) error { - batch, err := newBatch(ctx) +func processS3Event(ctx context.Context, ev *events.S3Event, pc Client, log *log.Logger) error { + batch, err := newBatch(ctx, pc) if err != nil { return err } - for _, record := range ev.Records { labels, err := getLabels(record) if err != nil { return err } - - obj, err := getS3Object(ctx, labels) + level.Info(*log).Log("msg", fmt.Sprintf("fetching s3 file: %s", labels["key"])) + s3Client, err := getS3Client(ctx, labels["bucket_region"]) if err != nil { return err } - - err = parseS3Log(ctx, batch, labels, obj) + obj, err := s3Client.GetObject(ctx, + &s3.GetObjectInput{ + Bucket: aws.String(labels["bucket"]), + Key: aws.String(labels["key"]), + ExpectedBucketOwner: aws.String(labels["bucketOwner"]), + }) + if err != nil { + return fmt.Errorf("Failed to get object %s from bucket %s on account %s\n, %s", labels["key"], labels["bucket"], labels["bucketOwner"], err) + } + err = parseS3Log(ctx, batch, labels, obj.Body) if err != nil { return err } - } - err = sendToPromtail(ctx, batch) + err = pc.sendToPromtail(ctx, batch) if err != nil { return err } diff --git a/tools/lambda-promtail/lambda-promtail/s3_test.go b/tools/lambda-promtail/lambda-promtail/s3_test.go index 9a5451033008..c8ffc1f2e62c 100644 --- a/tools/lambda-promtail/lambda-promtail/s3_test.go +++ b/tools/lambda-promtail/lambda-promtail/s3_test.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-lambda-go/events" "github.com/grafana/loki/pkg/logproto" + "github.com/stretchr/testify/require" ) func Test_getLabels(t *testing.T) { @@ -111,9 +112,10 @@ func Test_parseS3Log(t *testing.T) { batchSize int } tests := []struct { - name string - args args - wantErr bool + name string + args args + wantErr bool + expectedStream string }{ { name: "vpcflowlogs", @@ -124,10 +126,13 @@ func Test_parseS3Log(t *testing.T) { streams: map[string]*logproto.Stream{}, }, labels: map[string]string{ - "type": FLOW_LOG_TYPE, + "type": FLOW_LOG_TYPE, + "src": "source", + "account_id": "123456789", }, }, - wantErr: false, + expectedStream: `{__aws_log_type="s3_vpc_flow", __aws_s3_vpc_flow="source", __aws_s3_vpc_flow_owner="123456789"}`, + wantErr: false, }, { name: "albaccesslogs", @@ -138,10 +143,13 @@ func Test_parseS3Log(t *testing.T) { streams: map[string]*logproto.Stream{}, }, labels: map[string]string{ - "type": LB_LOG_TYPE, + "type": LB_LOG_TYPE, + "src": "source", + "account_id": "123456789", }, }, - wantErr: false, + expectedStream: `{__aws_log_type="s3_lb", __aws_s3_lb="source", __aws_s3_lb_owner="123456789"}`, + wantErr: false, }, } for _, tt := range tests { @@ -156,6 +164,11 @@ func Test_parseS3Log(t *testing.T) { if err := parseS3Log(context.Background(), tt.args.b, tt.args.labels, tt.args.obj); (err != nil) != tt.wantErr { t.Errorf("parseS3Log() error = %v, wantErr %v", err, tt.wantErr) } + + require.Len(t, tt.args.b.streams, 1) + stream, ok := tt.args.b.streams[tt.expectedStream] + require.True(t, ok, "batch does not contain stream: %s", tt.expectedStream) + require.NotNil(t, stream) }) } } diff --git a/tools/lambda-promtail/lambda-promtail/sqs.go b/tools/lambda-promtail/lambda-promtail/sqs.go new file mode 100644 index 000000000000..2a0d01e72e20 --- /dev/null +++ b/tools/lambda-promtail/lambda-promtail/sqs.go @@ -0,0 +1,32 @@ +package main + +import ( + "context" + "encoding/json" + + "github.com/aws/aws-lambda-go/events" +) + +func processSQSEvent(ctx context.Context, evt *events.SQSEvent) error { + for _, record := range evt.Records { + // retrieve nested + event, err := stringToRawEvent(record.Body) + if err != nil { + return err + } + err = handler(ctx, event) + if err != nil { + return err + } + } + return nil +} + +func stringToRawEvent(body string) (map[string]interface{}, error) { + result := make(map[string]interface{}) + err := json.Unmarshal([]byte(body), &result) + if err != nil { + return nil, err + } + return result, nil +} diff --git a/tools/lambda-promtail/lambda-promtail/sqs_test.go b/tools/lambda-promtail/lambda-promtail/sqs_test.go new file mode 100644 index 000000000000..47d7f760b572 --- /dev/null +++ b/tools/lambda-promtail/lambda-promtail/sqs_test.go @@ -0,0 +1,68 @@ +package main + +import ( + "testing" + + "github.com/aws/aws-lambda-go/events" +) + +func TestStringToRawEvent(t *testing.T) { + tc := &events.SQSEvent{ + Records: []events.SQSMessage{ + { + AWSRegion: "eu-west-3", + MessageId: "someID", + Body: `{ + "Records": [ + { + "eventVersion": "2.1", + "eventSource": "aws:s3", + "awsRegion": "us-east-1", + "eventTime": "2023-01-18T01:52:46.432Z", + "eventName": "ObjectCreated:Put", + "userIdentity": { + "principalId": "AWS:SOMEID:AWS.INTERNAL.URL" + }, + "requestParameters": { + "sourceIPAddress": "172.15.15.15" + }, + "responseElements": { + "x-amz-request-id": "SOMEID", + "x-amz-id-2": "SOMEID" + }, + "s3": { + "s3SchemaVersion": "1.0", + "configurationId": "tf-s3-queue-SOMEID", + "bucket": { + "name": "some-bucket-name", + "ownerIdentity": { + "principalId": "SOMEID" + + }, + "arn": "arn:aws:s3:::SOME-BUCKET-ARN" + }, + "object": { + "key": "SOME-PREFIX/AWSLogs/ACCOUNT-ID/vpcflowlogs/us-east-1/2023/01/18/end-of-filename.log.gz", + "size": 1042577, + "eTag": "SOMEID", + "versionId": "SOMEID", + "sequencer": "SOMEID" + } + } + } + ] + }`, + }, + }, + } + for _, record := range tc.Records { + event, err := stringToRawEvent(record.Body) + if err != nil { + t.Error(err) + } + _, err = checkEventType(event) + if err != nil { + t.Error(err) + } + } +} diff --git a/tools/lambda-promtail/main.tf b/tools/lambda-promtail/main.tf index 37f53b3d9a4a..8a1eb60742ba 100644 --- a/tools/lambda-promtail/main.tf +++ b/tools/lambda-promtail/main.tf @@ -1,7 +1,3 @@ -provider "aws" { - region = "us-east-2" -} - data "aws_region" "current" {} resource "aws_iam_role" "iam_for_lambda" { @@ -21,56 +17,56 @@ resource "aws_iam_role" "iam_for_lambda" { }) } -resource "aws_iam_role_policy" "logs" { - name = "lambda-logs" - role = aws_iam_role.iam_for_lambda.name - policy = jsonencode({ - "Statement" : [ - { - "Action" : [ - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:PutLogEvents", - ], - "Effect" : "Allow", - "Resource" : "arn:aws:logs:*:*:*", - }, - { - "Action" : [ - "s3:GetObject", - ], - "Effect" : "Allow", - "Resource" : [ - for bucket in toset(var.bucket_names) : "arn:aws:s3:::${bucket}/*" - ] - }, - { - "Action" : [ - "kms:Decrypt", - ], - "Effect" : "Allow", - "Resource" : "arn:aws:kms:*:*:*", - }, - { - "Action" : [ - "ec2:DescribeNetworkInterfaces", - "ec2:CreateNetworkInterface", - "ec2:DeleteNetworkInterface", - "ec2:DescribeInstances", - "ec2:AttachNetworkInterface" - ], - "Effect" : "Allow", - "Resource" : "*", - }, - { - "Action" : [ - "kinesis:*", - ], - "Effect" : "Allow", - "Resource" : "*" - } +data "aws_iam_policy_document" "logs" { + statement { + actions = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", ] - }) + resources = ["arn:aws:logs:*:*:*"] + } + + dynamic "statement" { + for_each = var.bucket_names + content { + actions = [ + "s3:GetObject", + ] + resources = ["arn:aws:s3:::${statement.value}/*"] + } + } + + statement { + actions = [ + "kms:Decrypt", + ] + resources = ["arn:aws:kms:*:*:*"] + } + + statement { + actions = [ + "ec2:DescribeNetworkInterfaces", + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:DescribeInstances", + "ec2:AttachNetworkInterface", + ] + resources = ["*"] + } + + statement { + actions = [ + "kinesis:*", + ] + resources = ["*"] + } +} + +resource "aws_iam_role_policy" "logs" { + name = "lambda-logs" + role = aws_iam_role.iam_for_lambda.name + policy = data.aws_iam_policy_document.logs.json } data "aws_iam_policy" "lambda_vpc_execution" { @@ -106,16 +102,17 @@ resource "aws_lambda_function" "lambda_promtail" { environment { variables = { - WRITE_ADDRESS = var.write_address - USERNAME = var.username - PASSWORD = var.password - BEARER_TOKEN = var.bearer_token - KEEP_STREAM = var.keep_stream - BATCH_SIZE = var.batch_size - EXTRA_LABELS = var.extra_labels - TENANT_ID = var.tenant_id - SKIP_TLS_VERIFY = var.skip_tls_verify - PRINT_LOG_LINE = var.print_log_line + WRITE_ADDRESS = var.write_address + USERNAME = var.username + PASSWORD = var.password + BEARER_TOKEN = var.bearer_token + KEEP_STREAM = var.keep_stream + BATCH_SIZE = var.batch_size + EXTRA_LABELS = var.extra_labels + OMIT_EXTRA_LABELS_PREFIX = var.omit_extra_labels_prefix ? "true" : "false" + TENANT_ID = var.tenant_id + SKIP_TLS_VERIFY = var.skip_tls_verify + PRINT_LOG_LINE = var.print_log_line } } @@ -141,7 +138,7 @@ resource "aws_lambda_permission" "lambda_promtail_allow_cloudwatch" { # However, if you need to provide an actual filter_pattern for a specific log group you should # copy this block and modify it accordingly. resource "aws_cloudwatch_log_subscription_filter" "lambdafunction_logfilter" { - for_each = toset(var.log_group_names) + for_each = var.log_group_names name = "lambdafunction_logfilter_${each.value}" log_group_name = each.value destination_arn = aws_lambda_function.lambda_promtail.arn @@ -151,7 +148,7 @@ resource "aws_cloudwatch_log_subscription_filter" "lambdafunction_logfilter" { } resource "aws_lambda_permission" "allow-s3-invoke-lambda-promtail" { - for_each = toset(var.bucket_names) + for_each = var.bucket_names action = "lambda:InvokeFunction" function_name = aws_lambda_function.lambda_promtail.arn principal = "s3.amazonaws.com" @@ -159,7 +156,7 @@ resource "aws_lambda_permission" "allow-s3-invoke-lambda-promtail" { } resource "aws_kinesis_stream" "kinesis_stream" { - for_each = toset(var.kinesis_stream_name) + for_each = var.kinesis_stream_name name = each.value shard_count = 1 retention_period = 48 @@ -175,7 +172,7 @@ resource "aws_kinesis_stream" "kinesis_stream" { } resource "aws_lambda_event_source_mapping" "kinesis_event_source" { - for_each = toset(var.kinesis_stream_name) + for_each = var.kinesis_stream_name event_source_arn = aws_kinesis_stream.kinesis_stream[each.key].arn function_name = aws_lambda_function.lambda_promtail.arn starting_position = "LATEST" @@ -183,7 +180,7 @@ resource "aws_lambda_event_source_mapping" "kinesis_event_source" { } resource "aws_s3_bucket_notification" "push-to-lambda-promtail" { - for_each = toset(var.bucket_names) + for_each = var.bucket_names bucket = each.value lambda_function { diff --git a/tools/lambda-promtail/template.yaml b/tools/lambda-promtail/template.yaml index 9317443604da..48f5946087cf 100644 --- a/tools/lambda-promtail/template.yaml +++ b/tools/lambda-promtail/template.yaml @@ -39,6 +39,10 @@ Parameters: Description: Comma separated list of extra labels, in the format 'name1,value1,name2,value2,...,nameN,valueN' to add to entries forwarded by lambda-promtail. Type: String Default: "" + OmitExtraLabelsPrefix: + Description: Whether or not to omit the prefix `__extra_` from extra labels defined in `ExtraLabels`. + Type: String + Default: "false" TenantID: Description: Tenant ID to be added when writing logs from lambda-promtail. Type: String @@ -94,6 +98,7 @@ Resources: BEARER_TOKEN: !Ref BearerToken KEEP_STREAM: !Ref KeepStream EXTRA_LABELS: !Ref ExtraLabels + OMIT_EXTRA_LABELS_PREFIX: !Ref OmitExtraLabelsPrefix TENANT_ID: !Ref TenantID SKIP_TLS_VERIFY: !Ref SkipTlsVerify LambdaPromtailVersion: diff --git a/tools/lambda-promtail/variables.tf b/tools/lambda-promtail/variables.tf index 6772de98e2a2..f468ffdb8eb4 100644 --- a/tools/lambda-promtail/variables.tf +++ b/tools/lambda-promtail/variables.tf @@ -5,13 +5,13 @@ variable "write_address" { } variable "bucket_names" { - type = list(string) + type = set(string) description = "List of S3 bucket names to create Event Notifications for." default = [] } variable "log_group_names" { - type = list(string) + type = set(string) description = "List of CloudWatch Log Group names to create Subscription Filters for." default = [] } @@ -66,6 +66,12 @@ variable "extra_labels" { default = "" } +variable "omit_extra_labels_prefix" { + type = bool + description = "Whether or not to omit the prefix `__extra_` from extra labels defined in the variable `extra_labels`." + default = false +} + variable "batch_size" { type = string description = "Determines when to flush the batch of logs (bytes)." @@ -86,7 +92,7 @@ variable "lambda_vpc_security_groups" { variable "kms_key_arn" { type = string - description = "kms key arn for encryp env vars." + description = "kms key arn for encrypting env vars." default = "" } @@ -97,7 +103,7 @@ variable "skip_tls_verify" { } variable "kinesis_stream_name" { - type = list(string) + type = set(string) description = "Enter kinesis name if kinesis stream is configured as event source in lambda." default = [] } diff --git a/tools/lambda-promtail/versions.tf b/tools/lambda-promtail/versions.tf new file mode 100644 index 000000000000..c77dc04fa988 --- /dev/null +++ b/tools/lambda-promtail/versions.tf @@ -0,0 +1,8 @@ +terraform { + required_version = ">= 0.15" + required_providers { + aws = { + source = "hashicorp/aws" + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md new file mode 100644 index 000000000000..80321d29a9ba --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -0,0 +1,503 @@ +# Release History + +## 1.2.0 (2022-11-04) + +### Features Added +* Added `ClientOptions.APIVersion` field, which overrides the default version a client + requests of the service, if the client supports this (all ARM clients do). +* Added package `tracing` that contains the building blocks for distributed tracing. +* Added field `TracingProvider` to type `policy.ClientOptions` that will be used to set the per-client tracing implementation. + +### Bugs Fixed +* Fixed an issue in `runtime.SetMultipartFormData` to properly handle slices of `io.ReadSeekCloser`. +* Fixed the MaxRetryDelay default to be 60s. +* Failure to poll the state of an LRO will now return an `*azcore.ResponseError` for poller types that require this behavior. +* Fixed a bug in `runtime.NewPipeline` that would cause pipeline-specified allowed headers and query parameters to be lost. + +### Other Changes +* Retain contents of read-only fields when sending requests. + +## 1.1.4 (2022-10-06) + +### Bugs Fixed +* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`. +* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string + +### Other Changes +* Removed logging URL from retry policy as it's redundant. +* Retry policy logs when it exits due to a non-retriable status code. + +## 1.1.3 (2022-09-01) + +### Bugs Fixed +* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines. + +## 1.1.2 (2022-08-09) + +### Other Changes +* Fixed various doc bugs. + +## 1.1.1 (2022-06-30) + +### Bugs Fixed +* Avoid polling when a RELO LRO synchronously terminates. + +## 1.1.0 (2022-06-03) + +### Other Changes +* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests. + +## 1.0.0 (2022-05-12) + +### Features Added +* Added interface `runtime.PollingHandler` to support custom poller implementations. + * Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`. + +### Breaking Changes +* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost` +* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic` +* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions` +* Removed `TokenRequestOptions.TenantID` +* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration` +* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()` +* Removed `arm/runtime.FinalStateVia` and related `const` values +* Renamed `runtime.PageProcessor` to `runtime.PagingHandler` +* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported. +* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()` +* `TokenCredential.GetToken` now returns `AccessToken` by value. + +### Bugs Fixed +* When per-try timeouts are enabled, only cancel the context after the body has been read and closed. +* The `Operation-Location` poller now properly handles `final-state-via` values. +* Improvements in `runtime.Poller[T]` + * `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state. + * `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries. + +### Other Changes +* Updated to latest `internal` module and absorbed breaking changes. + * Use `temporal.Resource` and deleted copy. +* The internal poller implementation has been refactored. + * The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification. + * The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface. + * The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it. +* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions` +* Default User-Agent headers no longer include `azcore` version information + +## 0.23.1 (2022-04-14) + +### Bugs Fixed +* Include XML header when marshalling XML content. +* Handle XML namespaces when searching for error code. +* Handle `odata.error` when searching for error code. + +## 0.23.0 (2022-04-04) + +### Features Added +* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations. +* Added `cloud` package with a new API for cloud configuration +* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type. + +### Breaking Changes +* Removed the `Poller` type-alias to the internal poller implementation. +* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations. +* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter. +* Replaced `arm.Endpoint` with `cloud` API + * Removed the `endpoint` parameter from `NewRPRegistrationPolicy()` + * `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error` +* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages. + * Removed the `pollerID` parameter as it's no longer required. + * Created optional parameter structs and moved optional parameters into them. +* Changed `FinalStateVia` field to a `const` type. + +### Other Changes +* Converted expiring resource and dependent types to use generics. + +## 0.22.0 (2022-03-03) + +### Features Added +* Added header `WWW-Authenticate` to the default allow-list of headers for logging. +* Added a pipeline policy that enables the retrieval of HTTP responses from API calls. + * Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default). + +### Breaking Changes +* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package. + +## 0.21.1 (2022-02-04) + +### Bugs Fixed +* Restore response body after reading in `Poller.FinalResponse()`. (#16911) +* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969) + +### Other Changes +* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789) + +## 0.21.0 (2022-01-11) + +### Features Added +* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger. +* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received. + +### Breaking Changes +* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions` +* Renamed `arm/ClientOptions.Host` to `.Endpoint` +* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload` +* Removed `azcore.HTTPResponse` interface type +* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter +* `runtime.NewResponseError()` no longer requires an `error` parameter + +## 0.20.0 (2021-10-22) + +### Breaking Changes +* Removed `arm.Connection` +* Removed `azcore.Credential` and `.NewAnonymousCredential()` + * `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential` +* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication +* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions` +* Contents in the `log` package have been slightly renamed. +* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions` +* Changed parameters for `NewBearerTokenPolicy()` +* Moved policy config options out of `arm/runtime` and into `arm/policy` + +### Features Added +* Updating Documentation +* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints +* `azcore.ClientOptions` contains common pipeline configuration settings +* Added support for multi-tenant authorization in `arm/runtime` +* Require one second minimum when calling `PollUntilDone()` + +### Bug Fixes +* Fixed a potential panic when creating the default Transporter. +* Close LRO initial response body when creating a poller. +* Fixed a panic when recursively cloning structs that contain time.Time. + +## 0.19.0 (2021-08-25) + +### Breaking Changes +* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors). + * `azcore` has all core functionality. + * `log` contains facilities for configuring in-box logging. + * `policy` is used for configuring pipeline options and creating custom pipeline policies. + * `runtime` contains various helpers used by SDK authors and generated content. + * `streaming` has helpers for streaming IO operations. +* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed. + * As a result, the `Request.Telemetry()` method has been removed. +* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it. +* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it. +* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively. + +### Bug Fixes +* Fixed an issue in the retry policy where the request body could be overwritten after a rewind. + +### Other Changes +* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively. + * The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy. +* Poller logic has been consolidated across ARM and core implementations. + * This required some changes to the internal interfaces for core pollers. +* The core poller types have been improved, including more logging and test coverage. + +## 0.18.1 (2021-08-20) + +### Features Added +* Adds an `ETag` type for comparing etags and handling etags on requests +* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object + +### Bugs Fixed +* `JoinPaths` will preserve query parameters encoded in the `root` url. + +### Other Changes +* Bumps dependency on `internal` module to the latest version (v0.7.0) + +## 0.18.0 (2021-07-29) +### Features Added +* Replaces methods from Logger type with two package methods for interacting with the logging functionality. +* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications` +* `azcore.SetListener` replaces `azcore.Logger().SetListener` + +### Breaking Changes +* Removes `Logger` type from `azcore` + + +## 0.17.0 (2021-07-27) +### Features Added +* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879) +* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123) + +### Breaking Changes +* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104) +* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103) +* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038) + + +## 0.16.2 (2021-05-26) +### Features Added +* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715) + + +## 0.16.1 (2021-05-19) +### Features Added +* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682) + + +## 0.16.0 (2021-05-07) +### Features Added +* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642) + + +## 0.15.1 (2021-05-06) +### Features Added +* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634) + + +## 0.15.0 (2021-05-05) +### Features Added +* Add support for null map and slice +* Export `Response.Payload` method + +### Breaking Changes +* remove `Response.UnmarshalError` as it's no longer required + + +## 0.14.5 (2021-04-23) +### Features Added +* Add `UnmarshalError()` on `azcore.Response` + + +## 0.14.4 (2021-04-22) +### Features Added +* Support for basic LRO polling +* Added type `LROPoller` and supporting types for basic polling on long running operations. +* rename poller param and added doc comment + +### Bugs Fixed +* Fixed content type detection bug in logging. + + +## 0.14.3 (2021-03-29) +### Features Added +* Add support for multi-part form data +* Added method `WriteMultipartFormData()` to Request. + + +## 0.14.2 (2021-03-17) +### Features Added +* Add support for encoding JSON null values +* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null. +* Documentation fixes + +### Bugs Fixed +* Fixed improper error wrapping + + +## 0.14.1 (2021-02-08) +### Features Added +* Add `Pager` and `Poller` interfaces to azcore + + +## 0.14.0 (2021-01-12) +### Features Added +* Accept zero-value options for default values +* Specify zero-value options structs to accept default values. +* Remove `DefaultXxxOptions()` methods. +* Do not silently change TryTimeout on negative values +* make per-try timeout opt-in + + +## 0.13.4 (2020-11-20) +### Features Added +* Include telemetry string in User Agent + + +## 0.13.3 (2020-11-20) +### Features Added +* Updating response body handling on `azcore.Response` + + +## 0.13.2 (2020-11-13) +### Features Added +* Remove implementation of stateless policies as first-class functions. + + +## 0.13.1 (2020-11-05) +### Features Added +* Add `Telemetry()` method to `azcore.Request()` + + +## 0.13.0 (2020-10-14) +### Features Added +* Rename `log` to `logger` to avoid name collision with the log package. +* Documentation improvements +* Simplified `DefaultHTTPClientTransport()` implementation + + +## 0.12.1 (2020-10-13) +### Features Added +* Update `internal` module dependence to `v0.5.0` + + +## 0.12.0 (2020-10-08) +### Features Added +* Removed storage specific content +* Removed internal content to prevent API clutter +* Refactored various policy options to conform with our options pattern + + +## 0.11.0 (2020-09-22) +### Features Added + +* Removed `LogError` and `LogSlowResponse`. +* Renamed `options` in `RequestLogOptions`. +* Updated `NewRequestLogPolicy()` to follow standard pattern for options. +* Refactored `requestLogPolicy.Do()` per above changes. +* Cleaned up/added logging in retry policy. +* Export `NewResponseError()` +* Fix `RequestLogOptions` comment + + +## 0.10.1 (2020-09-17) +### Features Added +* Add default console logger +* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'. +* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks. +* Add `LogLongRunningOperation` + + +## 0.10.0 (2020-09-10) +### Features Added +* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library. +* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter. +* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`. +* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request. +* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this. +* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type. +* moved path concatenation into `JoinPaths()` func + + +## 0.9.6 (2020-08-18) +### Features Added +* Improvements to body download policy +* Always download the response body for error responses, i.e. HTTP status codes >= 400. +* Simplify variable declarations + + +## 0.9.5 (2020-08-11) +### Features Added +* Set the Content-Length header in `Request.SetBody` + + +## 0.9.4 (2020-08-03) +### Features Added +* Fix cancellation of per try timeout +* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time. +* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion. +* Do not drain response body if there are no more retries +* Do not retry non-idempotent operations when body download fails + + +## 0.9.3 (2020-07-28) +### Features Added +* Add support for custom HTTP request headers +* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request. +* Use `azcore.WithHTTPHeader` to add HTTP headers to a context. +* Remove method specific to Go 1.14 + + +## 0.9.2 (2020-07-28) +### Features Added +* Omit read-only content from request payloads +* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation. +* Verify no fields were dropped +* Handle embedded struct types +* Added test for cloning by value +* Add messages to failures + + +## 0.9.1 (2020-07-22) +### Features Added +* Updated dependency on internal module to fix race condition. + + +## 0.9.0 (2020-07-09) +### Features Added +* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure. +* Updated `sdk/internal` dependency to latest version. +* Rename package alias + + +## 0.8.2 (2020-06-29) +### Features Added +* Added missing documentation comments + +### Bugs Fixed +* Fixed a bug in body download policy. + + +## 0.8.1 (2020-06-26) +### Features Added +* Miscellaneous clean-up reported by linters + + +## 0.8.0 (2020-06-01) +### Features Added +* Differentiate between standard and URL encoding. + + +## 0.7.1 (2020-05-27) +### Features Added +* Add support for for base64 encoding and decoding of payloads. + + +## 0.7.0 (2020-05-12) +### Features Added +* Change `RetryAfter()` to a function. + + +## 0.6.0 (2020-04-29) +### Features Added +* Updating `RetryAfter` to only return the detaion in the RetryAfter header + + +## 0.5.0 (2020-03-23) +### Features Added +* Export `TransportFunc` + +### Breaking Changes +* Removed `IterationDone` + + +## 0.4.1 (2020-02-25) +### Features Added +* Ensure per-try timeout is properly cancelled +* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy. +* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed. +* `Logger.Should()` will return false if no listener is set. + + +## 0.4.0 (2020-02-18) +### Features Added +* Enable custom `RetryOptions` to be specified per API call +* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call. +* Remove 429 from the list of default HTTP status codes for retry. +* Change StatusCodesForRetry to a slice so consumers can append to it. +* Added support for retry-after in HTTP-date format. +* Cleaned up some comments specific to storage. +* Remove `Request.SetQueryParam()` +* Renamed `MaxTries` to `MaxRetries` + +## 0.3.0 (2020-01-16) +### Features Added +* Added `DefaultRetryOptions` to create initialized default options. + +### Breaking Changes +* Removed `Response.CheckStatusCode()` + + +## 0.2.0 (2020-01-15) +### Features Added +* Add support for marshalling and unmarshalling JSON +* Removed `Response.Payload` field +* Exit early when unmarsahlling if there is no payload + + +## 0.1.0 (2020-01-10) +### Features Added +* Initial release diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt new file mode 100644 index 000000000000..48ea6616b5b8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md new file mode 100644 index 000000000000..35a74e18d09a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md @@ -0,0 +1,39 @@ +# Azure Core Client Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main) +[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main) + +The `azcore` module provides a set of common interfaces and types for Go SDK client modules. +These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html). + +## Getting started + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency. +To add the latest version to your `go.mod` file, execute the following command. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azcore +``` + +General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml new file mode 100644 index 000000000000..aab9218538da --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -0,0 +1,29 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go new file mode 100644 index 000000000000..9d077a3e1260 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cloud + +var ( + // AzureChina contains configuration for Azure China. + AzureChina = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzureGovernment contains configuration for Azure Government. + AzureGovernment = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzurePublic contains configuration for Azure Public Cloud. + AzurePublic = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{}, + } +) + +// ServiceName identifies a cloud service. +type ServiceName string + +// ResourceManager is a global constant identifying Azure Resource Manager. +const ResourceManager ServiceName = "resourceManager" + +// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager. +type ServiceConfiguration struct { + // Audience is the audience the client will request for its access tokens. + Audience string + // Endpoint is the service's base URL. + Endpoint string +} + +// Configuration configures a cloud. +type Configuration struct { + // ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory. + ActiveDirectoryAuthorityHost string + // Services contains configuration for the cloud's services. + Services map[ServiceName]ServiceConfiguration +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go new file mode 100644 index 000000000000..985b1bde2f2d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go @@ -0,0 +1,53 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/* +Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds. + +Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as +"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other +Azure Clouds to configure clients appropriately. + +This package contains predefined configuration for well-known sovereign clouds such as Azure Government and +Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For +example, configuring a credential and ARM client for Azure Government: + + opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment} + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) + +Applications deployed to a private cloud such as Azure Stack create a Configuration object with +appropriate values: + + c := cloud.Configuration{ + ActiveDirectoryAuthorityHost: "https://...", + Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ + cloud.ResourceManager: { + Audience: "...", + Endpoint: "https://...", + }, + }, + } + opts := azcore.ClientOptions{Cloud: c} + + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) +*/ +package cloud diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go new file mode 100644 index 000000000000..f9fb23422dfd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "context" + "reflect" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// AccessToken represents an Azure service bearer access token with expiry information. +type AccessToken struct { + Token string + ExpiresOn time.Time +} + +// TokenCredential represents a credential capable of providing an OAuth token. +type TokenCredential interface { + // GetToken requests an access token for the specified set of scopes. + GetToken(ctx context.Context, options policy.TokenRequestOptions) (AccessToken, error) +} + +// holds sentinel values used to send nulls +var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} + +// NullValue is used to send an explicit 'null' within a request. +// This is typically used in JSON-MERGE-PATCH operations to delete a value. +func NullValue[T any]() T { + t := shared.TypeOfT[T]() + v, found := nullables[t] + if !found { + var o reflect.Value + if k := t.Kind(); k == reflect.Map { + o = reflect.MakeMap(t) + } else if k == reflect.Slice { + // empty slices appear to all point to the same data block + // which causes comparisons to become ambiguous. so we create + // a slice with len/cap of one which ensures a unique address. + o = reflect.MakeSlice(t, 1, 1) + } else { + o = reflect.New(t.Elem()) + } + v = o.Interface() + nullables[t] = v + } + // return the sentinel object + return v.(T) +} + +// IsNullValue returns true if the field contains a null sentinel value. +// This is used by custom marshallers to properly encode a null value. +func IsNullValue[T any](v T) bool { + // see if our map has a sentinel object for this *T + t := reflect.TypeOf(v) + if o, found := nullables[t]; found { + o1 := reflect.ValueOf(o) + v1 := reflect.ValueOf(v) + // we found it; return true if v points to the sentinel object. + // NOTE: maps and slices can only be compared to nil, else you get + // a runtime panic. so we compare addresses instead. + return o1.Pointer() == v1.Pointer() + } + // no sentinel object for this *t + return false +} + +// ClientOptions contains configuration settings for a client's pipeline. +type ClientOptions = policy.ClientOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go new file mode 100644 index 000000000000..28c64678c769 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -0,0 +1,257 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients. + +The middleware consists of three components. + + - One or more Policy instances. + - A Transporter instance. + - A Pipeline instance that combines the Policy and Transporter instances. + +# Implementing the Policy Interface + +A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as +a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share +the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to +avoid race conditions. + +A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can +perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, +and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request +work, it must call the Next() method on the *policy.Request instance in order to pass the request to the +next Policy in the chain. + +When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance +can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response +body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response +and error instances to its caller. + +Template for implementing a stateless Policy: + + type policyFunc func(*policy.Request) (*http.Response, error) + + // Do implements the Policy interface on policyFunc. + func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) + } + + func NewMyStatelessPolicy() policy.Policy { + return policyFunc(func(req *policy.Request) (*http.Response, error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + }) + } + +Template for implementing a stateful Policy: + + type MyStatefulPolicy struct { + // TODO: add configuration/setting fields here + } + + // TODO: add initialization args to NewMyStatefulPolicy() + func NewMyStatefulPolicy() policy.Policy { + return &MyStatefulPolicy{ + // TODO: initialize configuration/setting fields here + } + } + + func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + } + +# Implementing the Transporter Interface + +The Transporter interface is responsible for sending the HTTP request and returning the corresponding +HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter +implementation uses a shared http.Client from the standard library. + +The same stateful/stateless rules for Policy implementations apply to Transporter implementations. + +# Using Policy and Transporter Instances Via a Pipeline + +To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. + + func NewPipeline(transport Transporter, policies ...Policy) Pipeline + +The specified Policy instances form a chain and are invoked in the order provided to NewPipeline +followed by the Transporter. + +Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + + func (p Pipeline) Do(req *Request) (*http.Request, error) + +The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter +instances. The response/error is then sent through the same chain of Policy instances in reverse +order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with +TransportA. + + pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) + +The flow of Request and Response looks like the following: + + policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ + | + HTTP(S) endpoint + | + caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ + +# Creating a Request Instance + +The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also +contains some internal state and provides various convenience methods. You create a Request instance +by calling the runtime.NewRequest function: + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + +If the Request should contain a body, call the SetBody method. + + func (req *Request) SetBody(body ReadSeekCloser, contentType string) error + +A seekable stream is required so that upon retry, the retry Policy instance can seek the stream +back to the beginning before retrying the network request and re-uploading the body. + +# Sending an Explicit Null + +Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. + + { + "delete-me": null + } + +This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as +a means to resolve the ambiguity between a field to be excluded and its zero-value. + + type Widget struct { + Name *string `json:",omitempty"` + Count *int `json:",omitempty"` + } + +In the above example, Name and Count are defined as pointer-to-type to disambiguate between +a missing value (nil) and a zero-value (0) which might have semantic differences. + +In a PATCH operation, any fields left as nil are to have their values preserved. When updating +a Widget's count, one simply specifies the new value for Count, leaving Name nil. + +To fulfill the requirement for sending a JSON null, the NullValue() function can be used. + + w := Widget{ + Count: azcore.NullValue[*int](), + } + +This sends an explict "null" for Count, indicating that any current value for Count should be deleted. + +# Processing the Response + +When the HTTP response is received, the *http.Response is returned directly. Each Policy instance +can inspect/mutate the *http.Response. + +# Built-in Logging + +To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. + +By default the logger writes to stderr. This can be customized by calling log.SetListener, providing +a callback that writes to the desired location. Any custom logging implementation MUST provide its +own synchronization to handle concurrent invocations. + +See the docs for the log package for further details. + +# Pageable Operations + +Pageable operations return potentially large data sets spread over multiple GET requests. The result of +each GET is a "page" of data consisting of a slice of items. + +Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. + + func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] + +The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages +and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. + + pager := widgetClient.NewListWidgetsPager(nil) + for pager.More() { + page, err := pager.NextPage(context.TODO()) + // handle err + for _, widget := range page.Values { + // process widget + } + } + +# Long-Running Operations + +Long-running operations (LROs) are operations consisting of an initial request to start the operation followed +by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one +of the following values. + + - Succeeded - the LRO completed successfully + - Failed - the LRO failed to complete + - Canceled - the LRO was canceled + +LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. + + func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) + +When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. +It does _not_ mean that the widget has been created or updated (or failed to be created/updated). + +The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, +call the PollUntilDone() method. + + poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) + // handle err + result, err := poller.PollUntilDone(context.TODO(), nil) + // handle err + // use result + +The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the +context is canceled/timed out. + +Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to +this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation +mechanism as required. + +# Resume Tokens + +Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to +recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. + + token, err := poller.ResumeToken() + // handle error + +Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls +to poller.Poll() might change the poller's state. In this case, a new token should be created. + +After the token has been obtained, it can be used to recreate an instance of the originating poller. + + poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ + ResumeToken: token, + }) + +When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. + +Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO +BeginA() will result in an error. +*/ +package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go new file mode 100644 index 000000000000..17bd50c67320 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go new file mode 100644 index 000000000000..23ea7e7c8eac --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go @@ -0,0 +1,48 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "strings" +) + +// ETag is a property used for optimistic concurrency during updates +// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2 +// An ETag can be empty (""). +type ETag string + +// ETagAny is an ETag that represents everything, the value is "*" +const ETagAny ETag = "*" + +// Equals does a strong comparison of two ETags. Equals returns true when both +// ETags are not weak and the values of the underlying strings are equal. +func (e ETag) Equals(other ETag) bool { + return !e.IsWeak() && !other.IsWeak() && e == other +} + +// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match +// character-by-character, regardless of either or both being tagged as "weak". +func (e ETag) WeakEquals(other ETag) bool { + getStart := func(e1 ETag) int { + if e1.IsWeak() { + return 2 + } + return 0 + } + aStart := getStart(e) + bStart := getStart(other) + + aVal := e[aStart:] + bVal := other[bStart:] + + return aVal == bVal +} + +// IsWeak specifies whether the ETag is strong or weak. +func (e ETag) IsWeak() bool { + return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go new file mode 100644 index 000000000000..6e029d493ce4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -0,0 +1,60 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// Exported as streaming.NopCloser(). +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +// Exported as runtime.Payload(). +func Payload(resp *http.Response) ([]byte, error) { + // r.Body won't be a nopClosingBytesReader if downloading was skipped + if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok { + return buf.Bytes(), nil + } + bytesBody, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, err + } + resp.Body = shared.NewNopClosingBytesReader(bytesBody) + return bytesBody, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go new file mode 100644 index 000000000000..c44efd6eff57 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go @@ -0,0 +1,97 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "fmt" + "net/http" + + "golang.org/x/net/http/httpguts" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +// Exported as policy.Policy. +type Policy interface { + // Do applies the policy to the specified Request. When implementing a Policy, mutate the + // request before calling req.Next() to move on to the next policy, and respond to the result + // before returning to the caller. + Do(req *Request) (*http.Response, error) +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +// Exported as runtime.Pipeline. +type Pipeline struct { + policies []Policy +} + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +// Exported as policy.Transporter. +type Transporter interface { + // Do sends the HTTP request and returns the HTTP response or error. + Do(req *http.Request) (*http.Response, error) +} + +// used to adapt a TransportPolicy to a Policy +type transportPolicy struct { + trans Transporter +} + +func (tp transportPolicy) Do(req *Request) (*http.Response, error) { + if tp.trans == nil { + return nil, errors.New("missing transporter") + } + resp, err := tp.trans.Do(req.Raw()) + if err != nil { + return nil, err + } else if resp == nil { + // there was no response and no error (rare but can happen) + // this ensures the retry policy will retry the request + return nil, errors.New("received nil response") + } + return resp, nil +} + +// NewPipeline creates a new Pipeline object from the specified Policies. +// Not directly exported, but used as part of runtime.NewPipeline(). +func NewPipeline(transport Transporter, policies ...Policy) Pipeline { + // transport policy must always be the last in the slice + policies = append(policies, transportPolicy{trans: transport}) + return Pipeline{ + policies: policies, + } +} + +// Do is called for each and every HTTP request. It passes the request through all +// the Policy objects (which can transform the Request's URL/query parameters/headers) +// and ultimately sends the transformed HTTP request over the network. +func (p Pipeline) Do(req *Request) (*http.Response, error) { + if req == nil { + return nil, errors.New("request cannot be nil") + } + // check copied from Transport.roundTrip() + for k, vv := range req.Raw().Header { + if !httpguts.ValidHeaderFieldName(k) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field value %q for key %v", v, k) + } + } + } + req.policies = p.policies + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go new file mode 100644 index 000000000000..4aeec158937b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use NewRequest() instead. +// Exported as policy.Request. +type Request struct { + req *http.Request + body io.ReadSeekCloser + policies []Policy + values opValues +} + +type opValues map[reflect.Type]interface{} + +// Set adds/changes a value +func (ov opValues) set(value interface{}) { + ov[reflect.TypeOf(value)] = value +} + +// Get looks for a value set by SetValue first +func (ov opValues) get(value interface{}) bool { + v, ok := ov[reflect.ValueOf(value).Elem().Type()] + if ok { + reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) + } + return ok +} + +// NewRequest creates a new Request with the specified input. +// Exported as runtime.NewRequest(). +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { + req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil) + if err != nil { + return nil, err + } + if req.URL.Host == "" { + return nil, errors.New("no Host in request URL") + } + if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") { + return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme) + } + return &Request{req: req}, nil +} + +// Body returns the original body specified when the Request was created. +func (req *Request) Body() io.ReadSeekCloser { + return req.body +} + +// Raw returns the underlying HTTP request. +func (req *Request) Raw() *http.Request { + return req.req +} + +// Next calls the next policy in the pipeline. +// If there are no more policies, nil and an error are returned. +// This method is intended to be called from pipeline policies. +// To send a request through a pipeline call Pipeline.Do(). +func (req *Request) Next() (*http.Response, error) { + if len(req.policies) == 0 { + return nil, errors.New("no more policies") + } + nextPolicy := req.policies[0] + nextReq := *req + nextReq.policies = nextReq.policies[1:] + return nextPolicy.Do(&nextReq) +} + +// SetOperationValue adds/changes a mutable key/value associated with a single operation. +func (req *Request) SetOperationValue(value interface{}) { + if req.values == nil { + req.values = opValues{} + } + req.values.set(value) +} + +// OperationValue looks for a value set by SetOperationValue(). +func (req *Request) OperationValue(value interface{}) bool { + if req.values == nil { + return false + } + return req.values.get(value) +} + +// SetBody sets the specified ReadSeekCloser as the HTTP request body. +func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { + // Set the body and content length. + size, err := body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } + if size == 0 { + body.Close() + return nil + } + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } + // keep a copy of the original body. this is to handle cases + // where req.Body is replaced, e.g. httputil.DumpRequest and friends. + req.body = body + req.req.Body = body + req.req.ContentLength = size + req.req.Header.Set(shared.HeaderContentType, contentType) + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + return nil +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (req *Request) RewindBody() error { + if req.body != nil { + // Reset the stream back to the beginning and restore the body + _, err := req.body.Seek(0, io.SeekStart) + req.req.Body = req.body + return err + } + return nil +} + +// Close closes the request body. +func (req *Request) Close() error { + if req.body == nil { + return nil + } + return req.body.Close() +} + +// Clone returns a deep copy of the request with its context changed to ctx. +func (req *Request) Clone(ctx context.Context) *Request { + r2 := *req + r2.req = req.req.Clone(ctx) + return &r2 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go new file mode 100644 index 000000000000..3db6acc83258 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -0,0 +1,142 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "regexp" +) + +// NewResponseError creates a new *ResponseError from the provided HTTP response. +// Exported as runtime.NewResponseError(). +func NewResponseError(resp *http.Response) error { + respErr := &ResponseError{ + StatusCode: resp.StatusCode, + RawResponse: resp, + } + + // prefer the error code in the response header + if ec := resp.Header.Get("x-ms-error-code"); ec != "" { + respErr.ErrorCode = ec + return respErr + } + + // if we didn't get x-ms-error-code, check in the response body + body, err := Payload(resp) + if err != nil { + return err + } + + if len(body) > 0 { + if code := extractErrorCodeJSON(body); code != "" { + respErr.ErrorCode = code + } else if code := extractErrorCodeXML(body); code != "" { + respErr.ErrorCode = code + } + } + + return respErr +} + +func extractErrorCodeJSON(body []byte) string { + var rawObj map[string]interface{} + if err := json.Unmarshal(body, &rawObj); err != nil { + // not a JSON object + return "" + } + + // check if this is a wrapped error, i.e. { "error": { ... } } + // if so then unwrap it + if wrapped, ok := rawObj["error"]; ok { + unwrapped, ok := wrapped.(map[string]interface{}) + if !ok { + return "" + } + rawObj = unwrapped + } else if wrapped, ok := rawObj["odata.error"]; ok { + // check if this a wrapped odata error, i.e. { "odata.error": { ... } } + unwrapped, ok := wrapped.(map[string]any) + if !ok { + return "" + } + rawObj = unwrapped + } + + // now check for the error code + code, ok := rawObj["code"] + if !ok { + return "" + } + codeStr, ok := code.(string) + if !ok { + return "" + } + return codeStr +} + +func extractErrorCodeXML(body []byte) string { + // regular expression is much easier than dealing with the XML parser + rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`) + res := rx.FindStringSubmatch(string(body)) + if len(res) != 2 { + return "" + } + // first submatch is the entire thing, second one is the captured error code + return res[1] +} + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +// Exported as azcore.ResponseError. +type ResponseError struct { + // ErrorCode is the error code returned by the resource provider if available. + ErrorCode string + + // StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants. + StatusCode int + + // RawResponse is the underlying HTTP response. + RawResponse *http.Response +} + +// Error implements the error interface for type ResponseError. +// Note that the message contents are not contractual and can change over time. +func (e *ResponseError) Error() string { + // write the request method and URL with response status code + msg := &bytes.Buffer{} + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + if e.ErrorCode != "" { + fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) + } else { + fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := Payload(e.RawResponse) + if err != nil { + // this really shouldn't fail at this point as the response + // body is already cached (it was read in NewResponseError) + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + // the standard library doesn't have a pretty-printer for XML + fmt.Fprintln(msg) + } else { + fmt.Fprintln(msg, "Response contained no body") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + + return msg.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go new file mode 100644 index 000000000000..0684cb317390 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// This is an internal helper package to combine the complete logging APIs. +package log + +import ( + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type Event = log.Event + +const ( + EventRequest = azlog.EventRequest + EventResponse = azlog.EventResponse + EventRetryPolicy = azlog.EventRetryPolicy + EventLRO = azlog.EventLRO +) + +func Write(cls log.Event, msg string) { + log.Write(cls, msg) +} + +func Writef(cls log.Event, format string, a ...interface{}) { + log.Writef(cls, format, a...) +} + +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +func Should(cls log.Event) bool { + return log.Should(cls) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go new file mode 100644 index 000000000000..d34f161c7b08 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -0,0 +1,158 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package async + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md + +// Applicable returns true if the LRO is using Azure-AsyncOperation. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderAzureAsync) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["asyncURL"] + return ok +} + +// Poller is an LRO poller that uses the Azure-AsyncOperation pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The URL from Azure-AsyncOperation header. + AsyncURL string `json:"asyncURL"` + + // The URL from Location header. + LocURL string `json:"locURL"` + + // The URL from the initial LRO request. + OrigURL string `json:"origURL"` + + // The HTTP method from the initial LRO request. + Method string `json:"method"` + + // The value of final-state-via from swagger, can be the empty string. + FinalState pollers.FinalStateVia `json:"finalState"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response and final-state type. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.") + asyncURL := resp.Header.Get(shared.HeaderAzureAsync) + if asyncURL == "" { + return nil, errors.New("response is missing Azure-AsyncOperation header") + } + if !pollers.IsValidURL(asyncURL) { + return nil, fmt.Errorf("invalid polling URL %s", asyncURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := pollers.GetProvisioningState(resp) + if state == "" { + state = pollers.StatusInProgress + } + p := &Poller[T]{ + pl: pl, + resp: resp, + AsyncURL: asyncURL, + LocURL: resp.Header.Get(shared.HeaderLocation), + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: state, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return pollers.IsTerminalState(p.CurState) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) { + if !pollers.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := pollers.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if pollers.Failed(p.CurState) { + return exported.NewResponseError(p.resp) + } + var req *exported.Request + var err error + if p.Method == http.MethodPatch || p.Method == http.MethodPut { + // for PATCH and PUT, the final GET is on the original resource URL + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost { + if p.FinalState == pollers.FinalStateViaAzureAsyncOp { + // no final GET required + } else if p.FinalState == pollers.FinalStateViaOriginalURI { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.LocURL != "" { + // ideally FinalState would be set to "location" but it isn't always. + // must check last due to more permissive condition. + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go new file mode 100644 index 000000000000..7efdd8a0df1c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -0,0 +1,134 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package body + +import ( + "context" + "errors" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" +) + +// Kind is the identifier of this type in a resume token. +const kind = "body" + +// Applicable returns true if the LRO is using no headers, just provisioning state. +// This is only applicable to PATCH and PUT methods and assumes no polling headers. +func Applicable(resp *http.Response) bool { + // we can't check for absense of headers due to some misbehaving services + // like redis that return a Location header but don't actually use that protocol + return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Body pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The poller's type, used for resume token processing. + Type string `json:"type"` + + // The URL for polling. + PollURL string `json:"pollURL"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Body poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Body poller.") + p := &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: resp.Request.URL.String(), + } + // default initial state to InProgress. depending on the HTTP + // status code and provisioning state, we might change the value. + curState := pollers.StatusInProgress + provState, err := pollers.GetProvisioningState(resp) + if err != nil && !errors.Is(err, pollers.ErrNoBody) { + return nil, err + } + if resp.StatusCode == http.StatusCreated && provState != "" { + // absense of provisioning state is ok for a 201, means the operation is in progress + curState = provState + } else if resp.StatusCode == http.StatusOK { + if provState != "" { + curState = provState + } else if provState == "" { + // for a 200, absense of provisioning state indicates success + curState = pollers.StatusSucceeded + } + } else if resp.StatusCode == http.StatusNoContent { + curState = pollers.StatusSucceeded + } + p.CurState = curState + return p, nil +} + +func (p *Poller[T]) Done() bool { + return pollers.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + if !pollers.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + if resp.StatusCode == http.StatusNoContent { + p.resp = resp + p.CurState = pollers.StatusSucceeded + return p.CurState, nil + } + state, err := pollers.GetProvisioningState(resp) + if errors.Is(err, pollers.ErrNoBody) { + // a missing response body in non-204 case is an error + return "", err + } else if state == "" { + // a response body without provisioning state is considered terminal success + state = pollers.StatusSucceeded + } else if err != nil { + return "", err + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go new file mode 100644 index 000000000000..276685da443e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -0,0 +1,118 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package loc + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Kind is the identifier of this type in a resume token. +const kind = "loc" + +// Applicable returns true if the LRO is using Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + Type string `json:"type"` + PollURL string `json:"pollURL"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Location poller.") + locURL := resp.Header.Get(shared.HeaderLocation) + if locURL == "" { + return nil, errors.New("response is missing Location header") + } + if !pollers.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid polling URL %s", locURL) + } + // check for provisioning state. if the operation is a RELO + // and terminates synchronously this will prevent extra polling. + // it's ok if there's no provisioning state. + state, _ := pollers.GetProvisioningState(resp) + if state == "" { + state = pollers.StatusInProgress + } + return &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: locURL, + CurState: state, + }, nil +} + +func (p *Poller[T]) Done() bool { + return pollers.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + // location polling can return an updated polling URL + if h := resp.Header.Get(shared.HeaderLocation); h != "" { + p.PollURL = h + } + // if provisioning state is available, use that. this is only + // for some ARM LRO scenarios (e.g. DELETE with a Location header) + // so if it's missing then use HTTP status code. + provState, _ := pollers.GetProvisioningState(resp) + p.resp = resp + if provState != "" { + p.CurState = provState + } else if resp.StatusCode == http.StatusAccepted { + p.CurState = pollers.StatusInProgress + } else if resp.StatusCode > 199 && resp.StatusCode < 300 { + // any 2xx other than a 202 indicates success + p.CurState = pollers.StatusSucceeded + } else { + p.CurState = pollers.StatusFailed + } + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go new file mode 100644 index 000000000000..c3c648266a28 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -0,0 +1,144 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package op + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Applicable returns true if the LRO is using Operation-Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderOperationLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["oplocURL"] + return ok +} + +// Poller is an LRO poller that uses the Operation-Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + OpLocURL string `json:"oplocURL"` + LocURL string `json:"locURL"` + OrigURL string `json:"origURL"` + Method string `json:"method"` + FinalState pollers.FinalStateVia `json:"finalState"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Operation-Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Operation-Location poller.") + opURL := resp.Header.Get(shared.HeaderOperationLocation) + if opURL == "" { + return nil, errors.New("response is missing Operation-Location header") + } + if !pollers.IsValidURL(opURL) { + return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL) + } + locURL := resp.Header.Get(shared.HeaderLocation) + // Location header is optional + if locURL != "" && !pollers.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid Location URL %s", locURL) + } + // default initial state to InProgress. if the + // service sent us a status then use that instead. + curState := pollers.StatusInProgress + status, err := pollers.GetStatus(resp) + if err != nil && !errors.Is(err, pollers.ErrNoBody) { + return nil, err + } + if status != "" { + curState = status + } + + return &Poller[T]{ + pl: pl, + resp: resp, + OpLocURL: opURL, + LocURL: locURL, + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: curState, + }, nil +} + +func (p *Poller[T]) Done() bool { + return pollers.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) { + if !pollers.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + state, err := pollers.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + var req *exported.Request + var err error + if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost { + // no final GET required, terminal response should have it + } else if rl, rlErr := pollers.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, pollers.ErrNoBody) { + return rlErr + } else if rl != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, rl) + } else if p.Method == http.MethodPatch || p.Method == http.MethodPut { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go new file mode 100644 index 000000000000..37ed647f4e0d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go @@ -0,0 +1,24 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia string + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation" + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation FinalStateVia = "location" + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI FinalStateVia = "original-uri" + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation FinalStateVia = "operation-location" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go new file mode 100644 index 000000000000..17ab7dadc3fa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -0,0 +1,317 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// the well-known set of LRO status/provisioning state values. +const ( + StatusSucceeded = "Succeeded" + StatusCanceled = "Canceled" + StatusFailed = "Failed" + StatusInProgress = "InProgress" +) + +// IsTerminalState returns true if the LRO's state is terminal. +func IsTerminalState(s string) bool { + return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) +} + +// Failed returns true if the LRO's state is terminal failure. +func Failed(s string) bool { + return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) +} + +// Succeeded returns true if the LRO's state is terminal success. +func Succeeded(s string) bool { + return strings.EqualFold(s, StatusSucceeded) +} + +// returns true if the LRO response contains a valid HTTP status code +func StatusCodeValid(resp *http.Response) bool { + return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent) +} + +// IsValidURL verifies that the URL is valid and absolute. +func IsValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// getTokenTypeName creates a type name from the type parameter T. +func getTokenTypeName[T any]() (string, error) { + tt := shared.TypeOfT[T]() + var n string + if tt.Kind() == reflect.Pointer { + n = "*" + tt = tt.Elem() + } + n += tt.Name() + if n == "" { + return "", errors.New("nameless types are not allowed") + } + return n, nil +} + +type resumeTokenWrapper[T any] struct { + Type string `json:"type"` + Token T `json:"token"` +} + +// NewResumeToken creates a resume token from the specified type. +// An error is returned if the generic type has no name (e.g. struct{}). +func NewResumeToken[TResult, TSource any](from TSource) (string, error) { + n, err := getTokenTypeName[TResult]() + if err != nil { + return "", err + } + b, err := json.Marshal(resumeTokenWrapper[TSource]{ + Type: n, + Token: from, + }) + if err != nil { + return "", err + } + return string(b), nil +} + +// ExtractToken returns the poller-specific token information from the provided token value. +func ExtractToken(token string) ([]byte, error) { + raw := map[string]json.RawMessage{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return nil, err + } + // this is dependent on the type resumeTokenWrapper[T] + tk, ok := raw["token"] + if !ok { + return nil, errors.New("missing token value") + } + return tk, nil +} + +// IsTokenValid returns an error if the specified token isn't applicable for generic type T. +func IsTokenValid[T any](token string) error { + raw := map[string]interface{}{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return err + } + t, ok := raw["type"] + if !ok { + return errors.New("missing type value") + } + tt, ok := t.(string) + if !ok { + return fmt.Errorf("invalid type format %T", t) + } + n, err := getTokenTypeName[T]() + if err != nil { + return err + } + if tt != n { + return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n) + } + return nil +} + +// ErrNoBody is returned if the response didn't contain a body. +var ErrNoBody = errors.New("the response did not contain a body") + +// GetJSON reads the response body into a raw JSON object. +// It returns ErrNoBody if there was no content. +func GetJSON(resp *http.Response) (map[string]interface{}, error) { + body, err := exported.Payload(resp) + if err != nil { + return nil, err + } + if len(body) == 0 { + return nil, ErrNoBody + } + // unmarshall the body to get the value + var jsonBody map[string]interface{} + if err = json.Unmarshal(body, &jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// provisioningState returns the provisioning state from the response or the empty string. +func provisioningState(jsonBody map[string]interface{}) string { + jsonProps, ok := jsonBody["properties"] + if !ok { + return "" + } + props, ok := jsonProps.(map[string]interface{}) + if !ok { + return "" + } + rawPs, ok := props["provisioningState"] + if !ok { + return "" + } + ps, ok := rawPs.(string) + if !ok { + return "" + } + return ps +} + +// status returns the status from the response or the empty string. +func status(jsonBody map[string]interface{}) string { + rawStatus, ok := jsonBody["status"] + if !ok { + return "" + } + status, ok := rawStatus.(string) + if !ok { + return "" + } + return status +} + +// GetStatus returns the LRO's status from the response body. +// Typically used for Azure-AsyncOperation flows. +// If there is no status in the response body the empty string is returned. +func GetStatus(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return status(jsonBody), nil +} + +// GetProvisioningState returns the LRO's state from the response body. +// If there is no state in the response body the empty string is returned. +func GetProvisioningState(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return provisioningState(jsonBody), nil +} + +// GetResourceLocation returns the LRO's resourceLocation value from the response body. +// Typically used for Operation-Location flows. +// If there is no resourceLocation in the response body the empty string is returned. +func GetResourceLocation(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + v, ok := jsonBody["resourceLocation"] + if !ok { + // it might be ok if the field doesn't exist, the caller must make that determination + return "", nil + } + vv, ok := v.(string) + if !ok { + return "", fmt.Errorf("the resourceLocation value %v was not in string format", v) + } + return vv, nil +} + +// used if the operation synchronously completed +type NopPoller[T any] struct { + resp *http.Response + result T +} + +// NewNopPoller creates a NopPoller from the provided response. +// It unmarshals the response body into an instance of T. +func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) { + np := &NopPoller[T]{resp: resp} + if resp.StatusCode == http.StatusNoContent { + return np, nil + } + payload, err := exported.Payload(resp) + if err != nil { + return nil, err + } + if len(payload) == 0 { + return np, nil + } + if err = json.Unmarshal(payload, &np.result); err != nil { + return nil, err + } + return np, nil +} + +func (*NopPoller[T]) Done() bool { + return true +} + +func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) { + return p.resp, nil +} + +func (p *NopPoller[T]) Result(ctx context.Context, out *T) error { + *out = p.result + return nil +} + +// PollHelper creates and executes the request, calling update() with the response. +// If the request fails, the update func is not called. +// The update func returns the state of the operation for logging purposes or an error +// if it fails to extract the required state from the response. +func PollHelper(ctx context.Context, endpoint string, pl exported.Pipeline, update func(resp *http.Response) (string, error)) error { + req, err := exported.NewRequest(ctx, http.MethodGet, endpoint) + if err != nil { + return err + } + resp, err := pl.Do(req) + if err != nil { + return err + } + state, err := update(resp) + if err != nil { + return err + } + log.Writef(log.EventLRO, "State %s", state) + return nil +} + +// ResultHelper processes the response as success or failure. +// In the success case, it unmarshals the payload into either a new instance of T or out. +// In the failure case, it creates an *azcore.Response error from the response. +func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { + // short-circuit the simple success case with no response body to unmarshal + if resp.StatusCode == http.StatusNoContent { + return nil + } + + defer resp.Body.Close() + if !StatusCodeValid(resp) || failed { + // the LRO failed. unmarshall the error and update state + return exported.NewResponseError(resp) + } + + // success case + payload, err := exported.Payload(resp) + if err != nil { + return err + } + if len(payload) == 0 { + return nil + } + + if err = json.Unmarshal(payload, out); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go new file mode 100644 index 000000000000..75d241c5b427 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +const ( + ContentTypeAppJSON = "application/json" + ContentTypeAppXML = "application/xml" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary" + HeaderAzureAsync = "Azure-AsyncOperation" + HeaderContentLength = "Content-Length" + HeaderContentType = "Content-Type" + HeaderLocation = "Location" + HeaderOperationLocation = "Operation-Location" + HeaderRetryAfter = "Retry-After" + HeaderUserAgent = "User-Agent" +) + +const BearerTokenPrefix = "Bearer " + +const ( + // Module is the name of the calling module used in telemetry data. + Module = "azcore" + + // Version is the semantic version (see http://semver.org) of this module. + Version = "v1.2.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go new file mode 100644 index 000000000000..96eef2956fff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +import ( + "context" + "errors" + "io" + "net/http" + "reflect" + "strconv" + "time" +) + +// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. +type CtxWithHTTPHeaderKey struct{} + +// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. +type CtxWithRetryOptionsKey struct{} + +// CtxIncludeResponseKey is used as a context key for retrieving the raw response. +type CtxIncludeResponseKey struct{} + +// Delay waits for the duration to elapse or the context to be cancelled. +func Delay(ctx context.Context, delay time.Duration) error { + select { + case <-time.After(delay): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// RetryAfter returns non-zero if the response contains a Retry-After header value. +func RetryAfter(resp *http.Response) time.Duration { + if resp == nil { + return 0 + } + ra := resp.Header.Get(HeaderRetryAfter) + if ra == "" { + return 0 + } + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + return time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + return time.Until(t) + } + return 0 +} + +// TypeOfT returns the type of the generic type param. +func TypeOfT[T any]() reflect.Type { + // you can't, at present, obtain the type of + // a type parameter, so this is the trick + return reflect.TypeOf((*T)(nil)).Elem() +} + +// BytesSetter abstracts replacing a byte slice on some type. +type BytesSetter interface { + Set(b []byte) +} + +// NewNopClosingBytesReader creates a new *NopClosingBytesReader for the specified slice. +func NewNopClosingBytesReader(data []byte) *NopClosingBytesReader { + return &NopClosingBytesReader{s: data} +} + +// NopClosingBytesReader is an io.ReadSeekCloser around a byte slice. +// It also provides direct access to the byte slice to avoid rereading. +type NopClosingBytesReader struct { + s []byte + i int64 +} + +// Bytes returns the underlying byte slice. +func (r *NopClosingBytesReader) Bytes() []byte { + return r.s +} + +// Close implements the io.Closer interface. +func (*NopClosingBytesReader) Close() error { + return nil +} + +// Read implements the io.Reader interface. +func (r *NopClosingBytesReader) Read(b []byte) (n int, err error) { + if r.i >= int64(len(r.s)) { + return 0, io.EOF + } + n = copy(b, r.s[r.i:]) + r.i += int64(n) + return +} + +// Set replaces the existing byte slice with the specified byte slice and resets the reader. +func (r *NopClosingBytesReader) Set(b []byte) { + r.s = b + r.i = 0 +} + +// Seek implements the io.Seeker interface. +func (r *NopClosingBytesReader) Seek(offset int64, whence int) (int64, error) { + var i int64 + switch whence { + case io.SeekStart: + i = offset + case io.SeekCurrent: + i = r.i + offset + case io.SeekEnd: + i = int64(len(r.s)) + offset + default: + return 0, errors.New("nopClosingBytesReader: invalid whence") + } + if i < 0 { + return 0, errors.New("nopClosingBytesReader: negative position") + } + r.i = i + return i, nil +} + +var _ BytesSetter = (*NopClosingBytesReader)(nil) + +// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface. +type TransportFunc func(*http.Request) (*http.Response, error) + +// Do implements the Transporter interface for the TransportFunc type. +func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) { + return pf(req) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go new file mode 100644 index 000000000000..2f3901bff3c4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package log contains functionality for configuring logging behavior. +// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all". +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go new file mode 100644 index 000000000000..7bde29d0a462 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go @@ -0,0 +1,50 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package log provides functionality for configuring logging facilities. +package log + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// Event is used to group entries. Each group can be toggled on or off. +type Event = log.Event + +const ( + // EventRequest entries contain information about HTTP requests. + // This includes information like the URL, query parameters, and headers. + EventRequest Event = "Request" + + // EventResponse entries contain information about HTTP responses. + // This includes information like the HTTP status code, headers, and request URL. + EventResponse Event = "Response" + + // EventRetryPolicy entries contain information specific to the retry policy in use. + EventRetryPolicy Event = "Retry" + + // EventLRO entries contain information specific to long-running operations. + // This includes information like polling location, operation state, and sleep intervals. + EventLRO Event = "LongRunningOperation" +) + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetEvents(cls ...Event) { + log.SetEvents(cls...) +} + +// SetListener will set the Logger to write to the specified Listener. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +// for testing purposes +func resetEvents() { + log.TestResetEvents() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go new file mode 100644 index 000000000000..fad2579ed6c5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package policy contains the definitions needed for configuring in-box pipeline policies +// and creating custom policies. +package policy diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go new file mode 100644 index 000000000000..27c30229883e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -0,0 +1,136 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package policy + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +type Policy = exported.Policy + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +type Transporter = exported.Transporter + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use runtime.NewRequest() instead. +type Request = exported.Request + +// ClientOptions contains optional settings for a client's pipeline. +// All zero-value fields will be initialized with default values. +type ClientOptions struct { + // APIVersion overrides the default version requested of the service. Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion string + + // Cloud specifies a cloud for the client. The default is Azure Public Cloud. + Cloud cloud.Configuration + + // Logging configures the built-in logging policy. + Logging LogOptions + + // Retry configures the built-in retry policy. + Retry RetryOptions + + // Telemetry configures the built-in telemetry policy. + Telemetry TelemetryOptions + + // TracingProvider configures the tracing provider. + // It defaults to a no-op tracer. + TracingProvider tracing.Provider + + // Transport sets the transport for HTTP requests. + Transport Transporter + + // PerCallPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCallPolicies []Policy + + // PerRetryPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetryPolicies []Policy +} + +// LogOptions configures the logging policy's behavior. +type LogOptions struct { + // IncludeBody indicates if request and response bodies should be included in logging. + // The default value is false. + // NOTE: enabling this can lead to disclosure of sensitive information, use with care. + IncludeBody bool + + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParams is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParams []string +} + +// RetryOptions configures the retry policy's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. +type RetryOptions struct { + // MaxRetries specifies the maximum number of attempts a failed operation will be retried + // before producing an error. + // The default value is three. A value less than zero means one try and no retries. + MaxRetries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // This is disabled by default. Specify a value greater than zero to enable. + // NOTE: Setting this to a small value might cause premature HTTP request time-outs. + TryTimeout time.Duration + + // RetryDelay specifies the initial amount of delay to use before retrying an operation. + // The value is used only if the HTTP response does not contain a Retry-After header. + // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. + // The default value is four seconds. A value less than zero means no delay between retries. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. + // Typically the value is greater than or equal to the value specified in RetryDelay. + // The default Value is 120 seconds. A value less than zero means there is no cap. + MaxRetryDelay time.Duration + + // StatusCodes specifies the HTTP status codes that indicate the operation should be retried. + // A nil slice will use the following values. + // http.StatusRequestTimeout 408 + // http.StatusTooManyRequests 429 + // http.StatusInternalServerError 500 + // http.StatusBadGateway 502 + // http.StatusServiceUnavailable 503 + // http.StatusGatewayTimeout 504 + // Specifying values will replace the default values. + // Specifying an empty slice will disable retries for HTTP status codes. + StatusCodes []int +} + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // ApplicationID is an application-specific identification string to add to the User-Agent. + // It has a maximum length of 24 characters and must not contain any spaces. + ApplicationID string + + // Disabled will prevent the addition of any telemetry data to the User-Agent. + Disabled bool +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +type TokenRequestOptions struct { + // Scopes contains the list of permission scopes required for the token. + Scopes []string +} + +// BearerTokenOptions configures the bearer token policy's behavior. +type BearerTokenOptions struct { + // placeholder for future options +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go new file mode 100644 index 000000000000..c9cfa438cb34 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package runtime contains various facilities for creating requests and handling responses. +// The content is intended for SDK authors. +package runtime diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go new file mode 100644 index 000000000000..6d03b291ebff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +// NewResponseError creates an *azcore.ResponseError from the provided HTTP response. +// Call this when a service request returns a non-successful status code. +func NewResponseError(resp *http.Response) error { + return exported.NewResponseError(resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go new file mode 100644 index 000000000000..5507665d651d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" +) + +// PagingHandler contains the required data for constructing a Pager. +type PagingHandler[T any] struct { + // More returns a boolean indicating if there are more pages to fetch. + // It uses the provided page to make the determination. + More func(T) bool + + // Fetcher fetches the first and subsequent pages. + Fetcher func(context.Context, *T) (T, error) +} + +// Pager provides operations for iterating over paged responses. +type Pager[T any] struct { + current *T + handler PagingHandler[T] + firstPage bool +} + +// NewPager creates an instance of Pager using the specified PagingHandler. +// Pass a non-nil T for firstPage if the first page has already been retrieved. +func NewPager[T any](handler PagingHandler[T]) *Pager[T] { + return &Pager[T]{ + handler: handler, + firstPage: true, + } +} + +// More returns true if there are more pages to retrieve. +func (p *Pager[T]) More() bool { + if p.current != nil { + return p.handler.More(*p.current) + } + return true +} + +// NextPage advances the pager to the next page. +func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { + var resp T + var err error + if p.current != nil { + if p.firstPage { + // we get here if it's an LRO-pager, we already have the first page + p.firstPage = false + return *p.current, nil + } else if !p.handler.More(*p.current) { + return *new(T), errors.New("no more pages") + } + resp, err = p.handler.Fetcher(ctx, p.current) + } else { + // non-LRO case, first page + p.firstPage = false + resp, err = p.handler.Fetcher(ctx, nil) + } + if err != nil { + return *new(T), err + } + p.current = &resp + return *p.current, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T]. +func (p *Pager[T]) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &p.current) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go new file mode 100644 index 000000000000..a2906f51bca9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// PipelineOptions contains Pipeline options for SDK developers +type PipelineOptions struct { + AllowedHeaders, AllowedQueryParameters []string + APIVersion APIVersionOptions + PerCall, PerRetry []policy.Policy +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +type Pipeline = exported.Pipeline + +// NewPipeline creates a pipeline from connection options, with any additional policies as specified. +// Policies from ClientOptions are placed after policies from PipelineOptions. +// The module and version parameters are used by the telemetry policy, when enabled. +func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline { + cp := policy.ClientOptions{} + if options != nil { + cp = *options + } + if len(plOpts.AllowedHeaders) > 0 { + headers := make([]string, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) + copy(headers, plOpts.AllowedHeaders) + headers = append(headers, cp.Logging.AllowedHeaders...) + cp.Logging.AllowedHeaders = headers + } + if len(plOpts.AllowedQueryParameters) > 0 { + qp := make([]string, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) + copy(qp, plOpts.AllowedQueryParameters) + qp = append(qp, cp.Logging.AllowedQueryParams...) + cp.Logging.AllowedQueryParams = qp + } + // we put the includeResponsePolicy at the very beginning so that the raw response + // is populated with the final response (some policies might mutate the response) + policies := []policy.Policy{policyFunc(includeResponsePolicy)} + if cp.APIVersion != "" { + policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion)) + } + if !cp.Telemetry.Disabled { + policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry)) + } + policies = append(policies, plOpts.PerCall...) + policies = append(policies, cp.PerCallPolicies...) + policies = append(policies, NewRetryPolicy(&cp.Retry)) + policies = append(policies, plOpts.PerRetry...) + policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, NewLogPolicy(&cp.Logging)) + policies = append(policies, policyFunc(httpHeaderPolicy), policyFunc(bodyDownloadPolicy)) + transport := cp.Transport + if transport == nil { + transport = defaultHTTPClient + } + return exported.NewPipeline(transport, policies...) +} + +// policyFunc is a type that implements the Policy interface. +// Use this type when implementing a stateless policy as a first-class function. +type policyFunc func(*policy.Request) (*http.Response, error) + +// Do implements the Policy interface on policyFunc. +func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go new file mode 100644 index 000000000000..e5309aa6c15b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// APIVersionOptions contains options for API versions +type APIVersionOptions struct { + // Location indicates where to set the version on a request, for example in a header or query param + Location APIVersionLocation + // Name is the name of the header or query parameter, for example "api-version" + Name string +} + +// APIVersionLocation indicates which part of a request identifies the service version +type APIVersionLocation int + +const ( + // APIVersionLocationQueryParam indicates a query parameter + APIVersionLocationQueryParam = 0 + // APIVersionLocationHeader indicates a header + APIVersionLocationHeader = 1 +) + +// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version +// isn't empty and opts.Name is empty, Do will return an error. +func newAPIVersionPolicy(version string, opts *APIVersionOptions) *apiVersionPolicy { + if opts == nil { + opts = &APIVersionOptions{} + } + return &apiVersionPolicy{location: opts.Location, name: opts.Name, version: version} +} + +// apiVersionPolicy enables users to set the API version of every request a client sends. +type apiVersionPolicy struct { + // location indicates whether "name" refers to a query parameter or header. + location APIVersionLocation + + // name of the query param or header whose value should be overridden; provided by the client. + name string + + // version is the value (provided by the user) that replaces the default version value. + version string +} + +// Do sets the request's API version, if the policy is configured to do so, replacing any prior value. +func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) { + if a.version != "" { + if a.name == "" { + // user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions + return nil, errors.New("this client doesn't support overriding its API version") + } + switch a.location { + case APIVersionLocationHeader: + req.Raw().Header.Set(a.name, a.version) + case APIVersionLocationQueryParam: + q := req.Raw().URL.Query() + q.Set(a.name, a.version) + req.Raw().URL.RawQuery = q.Encode() + default: + return nil, fmt.Errorf("unknown APIVersionLocation %d", a.location) + } + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go new file mode 100644 index 000000000000..71e3062be0bd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +type BearerTokenPolicy struct { + // mainResource is the resource to be retreived using the tenant specified in the credential + mainResource *temporal.Resource[azcore.AccessToken, acquiringResourceState] + // the following fields are read-only + cred azcore.TokenCredential + scopes []string +} + +type acquiringResourceState struct { + req *policy.Request + p *BearerTokenPolicy +} + +// acquire acquires or updates the resource; only one +// thread/goroutine at a time ever calls this function +func acquire(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(state.req.Raw().Context(), policy.TokenRequestOptions{Scopes: state.p.scopes}) + if err != nil { + return azcore.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} + +// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. +// cred: an azcore.TokenCredential implementation such as a credential object from azidentity +// scopes: the list of permission scopes required for the token. +// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. +func NewBearerTokenPolicy(cred azcore.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy { + return &BearerTokenPolicy{ + cred: cred, + scopes: scopes, + mainResource: temporal.NewResource(acquire), + } +} + +// Do authorizes a request with a bearer token +func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { + as := acquiringResourceState{ + p: b, + req: req, + } + tk, err := b.mainResource.Get(as) + if err != nil { + return nil, err + } + req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token) + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go new file mode 100644 index 000000000000..02d621ee89e2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go @@ -0,0 +1,73 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte. +func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if err != nil { + return resp, err + } + var opValues bodyDownloadPolicyOpValues + // don't skip downloading error response bodies + if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 { + return resp, err + } + // Either bodyDownloadPolicyOpValues was not specified (so skip is false) + // or it was specified and skip is false: don't skip downloading the body + _, err = exported.Payload(resp) + if err != nil { + return resp, newBodyDownloadError(err, req) + } + return resp, err +} + +// bodyDownloadPolicyOpValues is the struct containing the per-operation values +type bodyDownloadPolicyOpValues struct { + Skip bool +} + +type bodyDownloadError struct { + err error +} + +func newBodyDownloadError(err error, req *policy.Request) error { + // on failure, only retry the request for idempotent operations. + // we currently identify them as DELETE, GET, and PUT requests. + if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut { + // error is safe for retry + return err + } + // wrap error to avoid retries + return &bodyDownloadError{ + err: err, + } +} + +func (b *bodyDownloadError) Error() string { + return fmt.Sprintf("body download policy: %s", b.err.Error()) +} + +func (b *bodyDownloadError) NonRetriable() { + // marker method +} + +func (b *bodyDownloadError) Unwrap() error { + return b.err +} + +var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go new file mode 100644 index 000000000000..770e0a2b6a64 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request +func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { + // check if any custom HTTP headers have been specified + if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil { + for k, v := range header.(http.Header) { + // use Set to replace any existing value + // it also canonicalizes the header key + req.Raw().Header.Set(k, v[0]) + // add any remaining values + for i := 1; i < len(v); i++ { + req.Raw().Header.Add(k, v[i]) + } + } + } + return req.Next() +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go new file mode 100644 index 000000000000..4714baa30cd6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go @@ -0,0 +1,34 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request +func includeResponsePolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if resp == nil { + return resp, err + } + if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil { + httpOut := httpOutRaw.(**http.Response) + *httpOut = resp + } + return resp, err +} + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go new file mode 100644 index 000000000000..30a02a7a41b7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -0,0 +1,251 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "io" + "net/http" + "sort" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/diag" +) + +type logPolicy struct { + includeBody bool + allowedHeaders map[string]struct{} + allowedQP map[string]struct{} +} + +// NewLogPolicy creates a request/response logging policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewLogPolicy(o *policy.LogOptions) policy.Policy { + if o == nil { + o = &policy.LogOptions{} + } + // construct default hash set of allowed headers + allowedHeaders := map[string]struct{}{ + "accept": {}, + "cache-control": {}, + "connection": {}, + "content-length": {}, + "content-type": {}, + "date": {}, + "etag": {}, + "expires": {}, + "if-match": {}, + "if-modified-since": {}, + "if-none-match": {}, + "if-unmodified-since": {}, + "last-modified": {}, + "ms-cv": {}, + "pragma": {}, + "request-id": {}, + "retry-after": {}, + "server": {}, + "traceparent": {}, + "transfer-encoding": {}, + "user-agent": {}, + "www-authenticate": {}, + "x-ms-request-id": {}, + "x-ms-client-request-id": {}, + "x-ms-return-client-request-id": {}, + } + // add any caller-specified allowed headers to the set + for _, ah := range o.AllowedHeaders { + allowedHeaders[strings.ToLower(ah)] = struct{}{} + } + // now do the same thing for query params + allowedQP := map[string]struct{}{ + "api-version": {}, + } + for _, qp := range o.AllowedQueryParams { + allowedQP[strings.ToLower(qp)] = struct{}{} + } + return &logPolicy{ + includeBody: o.IncludeBody, + allowedHeaders: allowedHeaders, + allowedQP: allowedQP, + } +} + +// logPolicyOpValues is the struct containing the per-operation values +type logPolicyOpValues struct { + try int32 + start time.Time +} + +func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) { + // Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object. + var opValues logPolicyOpValues + if req.OperationValue(&opValues); opValues.start.IsZero() { + opValues.start = time.Now() // If this is the 1st try, record this operation's start time + } + opValues.try++ // The first try is #1 (not #0) + req.SetOperationValue(opValues) + + // Log the outgoing request as informational + if log.Should(log.EventRequest) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try) + p.writeRequestWithResponse(b, req, nil, nil) + var err error + if p.includeBody { + err = writeReqBody(req, b) + } + log.Write(log.EventRequest, b.String()) + if err != nil { + return nil, err + } + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err := req.Next() // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(opValues.start) + + if log.Should(log.EventResponse) { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE RECEIVED\n") + } + + p.writeRequestWithResponse(b, req, response, err) + if err != nil { + // skip frames runtime.Callers() and runtime.StackTrace() + b.WriteString(diag.StackTrace(2, 32)) + } else if p.includeBody { + err = writeRespBody(response, b) + } + log.Write(log.EventResponse, b.String()) + } + return response, err +} + +const redactedValue = "REDACTED" + +// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) { + // redact applicable query params + cpURL := *req.Raw().URL + qp := cpURL.Query() + for k := range qp { + if _, ok := p.allowedQP[strings.ToLower(k)]; !ok { + qp.Set(k, redactedValue) + } + } + cpURL.RawQuery = qp.Encode() + // Write the request into the buffer. + fmt.Fprint(b, " "+req.Raw().Method+" "+cpURL.String()+"\n") + p.writeHeader(b, req.Raw().Header) + if resp != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n") + p.writeHeader(b, resp.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + value := header.Get(k) + // redact all header values not in the allow-list + if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { + value = redactedValue + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} + +// returns true if the request/response body should be logged. +// this is determined by looking at the content-type header value. +func shouldLogBody(b *bytes.Buffer, contentType string) bool { + contentType = strings.ToLower(contentType) + if strings.HasPrefix(contentType, "text") || + strings.Contains(contentType, "json") || + strings.Contains(contentType, "xml") { + return true + } + fmt.Fprintf(b, " Skip logging body for %s\n", contentType) + return false +} + +// writes to a buffer, used for logging purposes +func writeReqBody(req *policy.Request, b *bytes.Buffer) error { + if req.Raw().Body == nil { + fmt.Fprint(b, " Request contained no body\n") + return nil + } + if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { + return nil + } + body, err := io.ReadAll(req.Raw().Body) + if err != nil { + fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) + return err + } + if err := req.RewindBody(); err != nil { + return err + } + logBody(b, body) + return nil +} + +// writes to a buffer, used for logging purposes +func writeRespBody(resp *http.Response, b *bytes.Buffer) error { + ct := resp.Header.Get(shared.HeaderContentType) + if ct == "" { + fmt.Fprint(b, " Response contained no body\n") + return nil + } else if !shouldLogBody(b, ct) { + return nil + } + body, err := Payload(resp) + if err != nil { + fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error()) + return err + } + if len(body) > 0 { + logBody(b, body) + } else { + fmt.Fprint(b, " Response contained no body\n") + } + return nil +} + +func logBody(b *bytes.Buffer, body []byte) { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprintln(b, string(body)) + fmt.Fprintln(b, " --------------------------------------------------------------------------------") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go new file mode 100644 index 000000000000..db70955b28bc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +type requestIDPolicy struct{} + +// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header +func NewRequestIDPolicy() policy.Policy { + return &requestIDPolicy{} +} + +func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) { + const requestIdHeader = "x-ms-client-request-id" + if req.Raw().Header.Get(requestIdHeader) == "" { + id, err := uuid.New() + if err != nil { + return nil, err + } + req.Raw().Header.Set(requestIdHeader, id.String()) + } + + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go new file mode 100644 index 000000000000..b33002018724 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -0,0 +1,251 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "io" + "math" + "math/rand" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +const ( + defaultMaxRetries = 3 +) + +func setDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = defaultMaxRetries + } else if o.MaxRetries < 0 { + o.MaxRetries = 0 + } + + // SDK guidelines specify the default MaxRetryDelay is 60 seconds + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 60 * time.Second + } else if o.MaxRetryDelay < 0 { + // not really an unlimited cap, but sufficiently large enough to be considered as such + o.MaxRetryDelay = math.MaxInt64 + } + if o.RetryDelay == 0 { + o.RetryDelay = 800 * time.Millisecond + } else if o.RetryDelay < 0 { + o.RetryDelay = 0 + } + if o.StatusCodes == nil { + // NOTE: if you change this list, you MUST update the docs in policy/policy.go + o.StatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + } +} + +func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 + pow := func(number int64, exponent int32) int64 { // pow is nested helper function + var result int64 = 1 + for n := int32(0); n < exponent; n++ { + result *= number + } + return result + } + + delay := time.Duration(pow(2, try)-1) * o.RetryDelay + + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand + if delay > o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} + +// NewRetryPolicy creates a policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewRetryPolicy(o *policy.RetryOptions) policy.Policy { + if o == nil { + o = &policy.RetryOptions{} + } + p := &retryPolicy{options: *o} + return p +} + +type retryPolicy struct { + options policy.RetryOptions +} + +func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + options := p.options + // check if the retry options have been overridden for this call + if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil { + options = override.(policy.RetryOptions) + } + setDefaults(&options) + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. + var rwbody *retryableRequestBody + if req.Body() != nil { + // wrap the body so we control when it's actually closed. + // do this outside the for loop so defers don't accumulate. + rwbody = &retryableRequestBody{body: req.Body()} + defer rwbody.realClose() + } + try := int32(1) + for { + resp = nil // reset + log.Writef(log.EventRetryPolicy, "=====> Try=%d", try) + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + err = req.RewindBody() + if err != nil { + return + } + // RewindBody() restores Raw().Body to its original state, so set our rewindable after + if rwbody != nil { + req.Raw().Body = rwbody + } + + if options.TryTimeout == 0 { + resp, err = req.Next() + } else { + // Set the per-try time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout) + clone := req.Clone(tryCtx) + resp, err = clone.Next() // Make the request + // if the body was already downloaded or there was an error it's safe to cancel the context now + if err != nil { + tryCancel() + } else if _, ok := resp.Body.(*shared.NopClosingBytesReader); ok { + tryCancel() + } else { + // must cancel the context after the body has been read and closed + resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body} + } + } + if err == nil { + log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode) + } else { + log.Writef(log.EventRetryPolicy, "error %v", err) + } + + if err == nil && !HasStatusCode(resp, options.StatusCodes...) { + // if there is no error and the response code isn't in the list of retry codes then we're done. + log.Write(log.EventRetryPolicy, "exit due to non-retriable status code") + return + } else if ctxErr := req.Raw().Context().Err(); ctxErr != nil { + // don't retry if the parent context has been cancelled or its deadline exceeded + err = ctxErr + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + + // check if the error is not retriable + var nre errorinfo.NonRetriable + if errors.As(err, &nre) { + // the error says it's not retriable so don't retry + log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre) + return + } + + if try == options.MaxRetries+1 { + // max number of tries has been reached, don't sleep again + log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries) + return + } + + // use the delay from retry-after if available + delay := shared.RetryAfter(resp) + if delay <= 0 { + delay = calcDelay(options, try) + } else if delay > options.MaxRetryDelay { + // the retry-after delay exceeds the the cap so don't retry + log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay) + return + } + + // drain before retrying so nothing is leaked + Drain(resp) + + log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) + select { + case <-time.After(delay): + try++ + case <-req.Raw().Context().Done(): + err = req.Raw().Context().Err() + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + } +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context { + return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The retry policy closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following type/methods implement the contextCancelReadCloser + +// contextCancelReadCloser combines an io.ReadCloser with a cancel func. +// it ensures the cancel func is invoked once the body has been read and closed. +type contextCancelReadCloser struct { + cf context.CancelFunc + body io.ReadCloser +} + +func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { + return rc.body.Read(p) +} + +func (rc *contextCancelReadCloser) Close() error { + err := rc.body.Close() + rc.cf() + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go new file mode 100644 index 000000000000..2abcdc576b69 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "net/http" + "os" + "runtime" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +type telemetryPolicy struct { + telemetryValue string +} + +// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests. +// The format is [ ]azsdk-go-/ . +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy { + if o == nil { + o = &policy.TelemetryOptions{} + } + tp := telemetryPolicy{} + if o.Disabled { + return &tp + } + b := &bytes.Buffer{} + // normalize ApplicationID + if o.ApplicationID != "" { + o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/") + if len(o.ApplicationID) > 24 { + o.ApplicationID = o.ApplicationID[:24] + } + b.WriteString(o.ApplicationID) + b.WriteRune(' ') + } + b.WriteString(formatTelemetry(mod, ver)) + b.WriteRune(' ') + b.WriteString(platformInfo) + tp.telemetryValue = b.String() + return &tp +} + +func formatTelemetry(comp, ver string) string { + return fmt.Sprintf("azsdk-go-%s/%s", comp, ver) +} + +func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) { + if p.telemetryValue == "" { + return req.Next() + } + // preserve the existing User-Agent string + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua) + } + req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue) + return req.Next() +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go new file mode 100644 index 000000000000..14c90fecfe5d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -0,0 +1,326 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia = pollers.FinalStateVia + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation = pollers.FinalStateViaLocation + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation = pollers.FinalStateViaOpLocation +) + +// NewPollerOptions contains the optional parameters for NewPoller. +type NewPollerOptions[T any] struct { + // FinalStateVia contains the final-state-via value for the LRO. + FinalStateVia FinalStateVia + + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPoller creates a Poller based on the provided initial response. +func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + if options.Handler != nil { + return &Poller[T]{ + op: options.Handler, + resp: resp, + result: result, + }, nil + } + + defer resp.Body.Close() + // this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success). + // ideally the codegen should return an error if the initial response failed and not even create a poller. + if !pollers.StatusCodeValid(resp) { + return nil, errors.New("the operation failed or was cancelled") + } + + // determine the polling method + var opr PollingHandler[T] + var err error + if async.Applicable(resp) { + // async poller must be checked first as it can also have a location header + opr, err = async.New[T](pl, resp, options.FinalStateVia) + } else if op.Applicable(resp) { + // op poller must be checked before loc as it can also have a location header + opr, err = op.New[T](pl, resp, options.FinalStateVia) + } else if loc.Applicable(resp) { + opr, err = loc.New[T](pl, resp) + } else if body.Applicable(resp) { + // must test body poller last as it's a subset of the other pollers. + // TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion) + opr, err = body.New[T](pl, resp) + } else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) { + // if we get here it means we have a 202 with no polling headers. + // for DELETE and POST this is a hard error per ARM RPC spec. + return nil, errors.New("response is missing polling URL") + } else { + opr, err = pollers.NewNopPoller[T](resp) + } + + if err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + resp: resp, + result: result, + }, nil +} + +// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken. +type NewPollerFromResumeTokenOptions[T any] struct { + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPollerFromResumeToken creates a Poller from a resume token string. +func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerFromResumeTokenOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + + if err := pollers.IsTokenValid[T](token); err != nil { + return nil, err + } + raw, err := pollers.ExtractToken(token) + if err != nil { + return nil, err + } + var asJSON map[string]interface{} + if err := json.Unmarshal(raw, &asJSON); err != nil { + return nil, err + } + + opr := options.Handler + // now rehydrate the poller based on the encoded poller type + if async.CanResume(asJSON) { + opr, _ = async.New[T](pl, nil, "") + } else if body.CanResume(asJSON) { + opr, _ = body.New[T](pl, nil) + } else if loc.CanResume(asJSON) { + opr, _ = loc.New[T](pl, nil) + } else if op.CanResume(asJSON) { + opr, _ = op.New[T](pl, nil, "") + } else if opr != nil { + log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) + } else { + return nil, fmt.Errorf("unhandled poller token %s", string(raw)) + } + if err := json.Unmarshal(raw, &opr); err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + result: result, + }, nil +} + +// PollingHandler[T] abstracts the differences among poller implementations. +type PollingHandler[T any] interface { + // Done returns true if the LRO has reached a terminal state. + Done() bool + + // Poll fetches the latest state of the LRO. + Poll(context.Context) (*http.Response, error) + + // Result is called once the LRO has reached a terminal state. It populates the out parameter + // with the result of the operation. + Result(ctx context.Context, out *T) error +} + +// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +type Poller[T any] struct { + op PollingHandler[T] + resp *http.Response + err error + result *T + done bool +} + +// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method. +type PollUntilDoneOptions struct { + // Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second. + // Pass zero to accept the default value (30s). + Frequency time.Duration +} + +// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires. +// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals. +// options: pass nil to accept the default values. +// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might +// benefit from a shorter or longer duration. +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) { + if options == nil { + options = &PollUntilDoneOptions{} + } + cp := *options + if cp.Frequency == 0 { + cp.Frequency = 30 * time.Second + } + + // skip the floor check when executing tests so they don't take so long + if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { + return *new(T), errors.New("polling frequency minimum is one second") + } + + start := time.Now() + logPollUntilDoneExit := func(v interface{}) { + log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) + } + log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) + if p.resp != nil { + // initial check for a retry-after header existing on the initial response + if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { + log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) + if err := shared.Delay(ctx, retryAfter); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } + } + // begin polling the endpoint until a terminal state is reached + for { + resp, err := p.Poll(ctx) + if err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + if p.Done() { + logPollUntilDoneExit("succeeded") + return p.Result(ctx) + } + d := cp.Frequency + if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { + log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String()) + d = retryAfter + } else { + log.Writef(log.EventLRO, "delay for %s", d.String()) + } + if err = shared.Delay(ctx, d); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } +} + +// Poll fetches the latest state of the LRO. It returns an HTTP response or error. +// If Poll succeeds, the poller's state is updated and the HTTP response is returned. +// If Poll fails, the poller's state is unmodified and the error is returned. +// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + if p.Done() { + // the LRO has reached a terminal state, don't poll again + return p.resp, nil + } + resp, err := p.op.Poll(ctx) + if err != nil { + return nil, err + } + p.resp = resp + return p.resp, nil +} + +// Done returns true if the LRO has reached a terminal state. +// Once a terminal state is reached, call Result(). +func (p *Poller[T]) Done() bool { + return p.op.Done() +} + +// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done. +// If the LRO completed successfully, a populated instance of T is returned. +// If the LRO failed or was canceled, an *azcore.ResponseError error is returned. +// Calling this on an LRO in a non-terminal state will return an error. +func (p *Poller[T]) Result(ctx context.Context) (T, error) { + if !p.Done() { + return *new(T), errors.New("poller is in a non-terminal state") + } + if p.done { + // the result has already been retrieved, return the cached value + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil + } + err := p.op.Result(ctx, p.result) + var respErr *exported.ResponseError + if errors.As(err, &respErr) { + // the LRO failed. record the error + p.err = err + } else if err != nil { + // the call to Result failed, don't cache anything in this case + return *new(T), err + } + p.done = true + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil +} + +// ResumeToken returns a value representing the poller that can be used to resume +// the LRO at a later time. ResumeTokens are unique per service operation. +// The token's format should be considered opaque and is subject to change. +// Calling this on an LRO in a terminal state will return an error. +func (p *Poller[T]) ResumeToken() (string, error) { + if p.Done() { + return "", errors.New("poller is in a terminal state") + } + tk, err := pollers.NewResumeToken[T](p.op) + if err != nil { + return "", err + } + return tk, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go new file mode 100644 index 000000000000..98e00718488e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -0,0 +1,248 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "mime/multipart" + "os" + "path" + "reflect" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +type Base64Encoding int + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = 0 + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = 1 +) + +// NewRequest creates a new policy.Request with the specified input. +// The endpoint MUST be properly encoded before calling this function. +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { + return exported.NewRequest(ctx, httpMethod, endpoint) +} + +// JoinPaths concatenates multiple URL path segments into one path, +// inserting path separation characters as required. JoinPaths will preserve +// query parameters in the root path +func JoinPaths(root string, paths ...string) string { + if len(paths) == 0 { + return root + } + + qps := "" + if strings.Contains(root, "?") { + splitPath := strings.Split(root, "?") + root, qps = splitPath[0], splitPath[1] + } + + p := path.Join(paths...) + // path.Join will remove any trailing slashes. + // if one was provided, preserve it. + if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") { + p += "/" + } + + if qps != "" { + p = p + "?" + qps + } + + if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") { + root = root[:len(root)-1] + } else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") { + p = "/" + p + } + return root + p +} + +// EncodeByteArray will base-64 encode the byte slice v. +func EncodeByteArray(v []byte, format Base64Encoding) string { + if format == Base64URLFormat { + return base64.RawURLEncoding.EncodeToString(v) + } + return base64.StdEncoding.EncodeToString(v) +} + +// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. +// The encoded value is treated as a JSON string. +func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { + // send as a JSON string + encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) + return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON) +} + +// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. +func MarshalAsJSON(req *policy.Request, v interface{}) error { + if omit := os.Getenv("AZURE_SDK_GO_OMIT_READONLY"); omit == "true" { + v = cloneWithoutReadOnlyFields(v) + } + b, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON) +} + +// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. +func MarshalAsXML(req *policy.Request, v interface{}) error { + b, err := xml.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + // inclue the XML header as some services require it + b = []byte(xml.Header + string(b)) + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) +} + +// SetMultipartFormData writes the specified keys/values as multi-part form +// fields with the specified value. File content must be specified as a ReadSeekCloser. +// All other values are treated as string values. +func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { + body := bytes.Buffer{} + writer := multipart.NewWriter(&body) + + writeContent := func(fieldname, filename string, src io.Reader) error { + fd, err := writer.CreateFormFile(fieldname, filename) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, src); err != nil { + return err + } + return nil + } + + for k, v := range formData { + if rsc, ok := v.(io.ReadSeekCloser); ok { + if err := writeContent(k, k, rsc); err != nil { + return err + } + continue + } else if rscs, ok := v.([]io.ReadSeekCloser); ok { + for _, rsc := range rscs { + if err := writeContent(k, k, rsc); err != nil { + return err + } + } + continue + } + // ensure the value is in string format + s, ok := v.(string) + if !ok { + s = fmt.Sprintf("%v", v) + } + if err := writer.WriteField(k, s); err != nil { + return err + } + } + if err := writer.Close(); err != nil { + return err + } + return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType()) +} + +// SkipBodyDownload will disable automatic downloading of the response body. +func SkipBodyDownload(req *policy.Request) { + req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) +} + +// returns a clone of the object graph pointed to by v, omitting values of all read-only +// fields. if there are no read-only fields in the object graph, no clone is created. +func cloneWithoutReadOnlyFields(v interface{}) interface{} { + val := reflect.Indirect(reflect.ValueOf(v)) + if val.Kind() != reflect.Struct { + // not a struct, skip + return v + } + // first walk the graph to find any R/O fields. + // if there aren't any, skip cloning the graph. + if !recursiveFindReadOnlyField(val) { + return v + } + return recursiveCloneWithoutReadOnlyFields(val) +} + +// returns true if any field in the object graph of val contains the `azure:"ro"` tag value +func recursiveFindReadOnlyField(val reflect.Value) bool { + t := val.Type() + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + return true + } else if reflect.Indirect(val.Field(i)).Kind() == reflect.Struct && recursiveFindReadOnlyField(reflect.Indirect(val.Field(i))) { + return true + } + } + return false +} + +// clones the object graph of val. all non-R/O properties are copied to the clone +func recursiveCloneWithoutReadOnlyFields(val reflect.Value) interface{} { + t := val.Type() + clone := reflect.New(t) + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + // omit from payload + continue + } + // clone field will receive the same value as the source field... + value := val.Field(i) + v := reflect.Indirect(value) + if v.IsValid() && v.Type() != reflect.TypeOf(time.Time{}) && v.Kind() == reflect.Struct { + // ...unless the source value is a struct, in which case we recurse to clone that struct. + // (We can't recursively clone time.Time because it contains unexported fields.) + c := recursiveCloneWithoutReadOnlyFields(v) + if field.Anonymous { + // NOTE: this does not handle the case of embedded fields of unexported struct types. + // this should be ok as we don't generate any code like this at present + value = reflect.Indirect(reflect.ValueOf(c)) + } else { + value = reflect.ValueOf(c) + } + } + reflect.Indirect(clone).Field(i).Set(value) + } + return clone.Interface() +} + +// returns true if the "azure" tag contains the option "ro" +func azureTagIsReadOnly(tag string) bool { + if tag == "" { + return false + } + parts := strings.Split(tag, ",") + for _, part := range parts { + if part == "ro" { + return true + } + } + return false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go new file mode 100644 index 000000000000..f86ec0b95ea1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -0,0 +1,136 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +func Payload(resp *http.Response) ([]byte, error) { + return exported.Payload(resp) +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + return exported.HasStatusCode(resp, statusCodes...) +} + +// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v. +func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error { + p, err := Payload(resp) + if err != nil { + return err + } + return DecodeByteArray(string(p), v, format) +} + +// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsJSON(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = json.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsXML(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = xml.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// Drain reads the response body to completion then closes it. The bytes read are discarded. +func Drain(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } +} + +// removeBOM removes any byte-order mark prefix from the payload if present. +func removeBOM(resp *http.Response) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // UTF8 + trimmed := bytes.TrimPrefix(payload, []byte("\xef\xbb\xbf")) + if len(trimmed) < len(payload) { + resp.Body.(shared.BytesSetter).Set(trimmed) + } + return nil +} + +// DecodeByteArray will base-64 decode the provided string into v. +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + if len(s) == 0 { + return nil + } + payload := string(s) + if payload[0] == '"' { + // remove surrounding quotes + payload = payload[1 : len(payload)-1] + } + switch format { + case Base64StdFormat: + decoded, err := base64.StdEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + case Base64URLFormat: + // use raw encoding as URL format should not contain any '=' characters + decoded, err := base64.RawURLEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + default: + return fmt.Errorf("unrecognized byte array format: %d", format) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go new file mode 100644 index 000000000000..869bed511842 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +var defaultHTTPClient *http.Client + +func init() { + defaultTransport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + defaultHTTPClient = &http.Client{ + Transport: defaultTransport, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go new file mode 100644 index 000000000000..cadaef3d5842 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package streaming contains helpers for streaming IO operations and progress reporting. +package streaming diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go new file mode 100644 index 000000000000..8563375af07e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -0,0 +1,72 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package streaming + +import ( + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +type progress struct { + rc io.ReadCloser + rsc io.ReadSeekCloser + pr func(bytesTransferred int64) + offset int64 +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return exported.NopCloser(rs) +} + +// NewRequestProgress adds progress reporting to an HTTP request's body stream. +func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser { + return &progress{ + rc: body, + rsc: body, + pr: pr, + offset: 0, + } +} + +// NewResponseProgress adds progress reporting to an HTTP response's body stream. +func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser { + return &progress{ + rc: body, + rsc: nil, + pr: pr, + offset: 0, + } +} + +// Read reads a block of data from an inner stream and reports progress +func (p *progress) Read(b []byte) (n int, err error) { + n, err = p.rc.Read(b) + if err != nil && err != io.EOF { + return + } + p.offset += int64(n) + // Invokes the user's callback method to report progress + p.pr(p.offset) + return +} + +// Seek only expects a zero or from beginning. +func (p *progress) Seek(offset int64, whence int) (int64, error) { + // This should only ever be called with offset = 0 and whence = io.SeekStart + n, err := p.rsc.Seek(offset, whence) + if err == nil { + p.offset = int64(n) + } + return n, err +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (p *progress) Close() error { + return p.rc.Close() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go new file mode 100644 index 000000000000..faa98c9dc514 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package to contains various type-conversion helper functions. +package to diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go new file mode 100644 index 000000000000..e0e4817b90d1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go @@ -0,0 +1,21 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package to + +// Ptr returns a pointer to the provided value. +func Ptr[T any](v T) *T { + return &v +} + +// SliceOfPtrs returns a slice of *T from the specified values. +func SliceOfPtrs[T any](vv ...T) []*T { + slc := make([]*T, len(vv)) + for i := range vv { + slc[i] = Ptr(vv[i]) + } + return slc +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go new file mode 100644 index 000000000000..80282d4ab0a6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package tracing + +// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span will be processed and visualized by various backends. +type SpanKind int + +const ( + // SpanKindInternal indicates the span represents an internal operation within an application. + SpanKindInternal SpanKind = 1 + + // SpanKindServer indicates the span covers server-side handling of a request. + SpanKindServer SpanKind = 2 + + // SpanKindClient indicates the span describes a request to a remote service. + SpanKindClient SpanKind = 3 + + // SpanKindProducer indicates the span was created by a messaging producer. + SpanKindProducer SpanKind = 4 + + // SpanKindConsumer indicates the span was created by a messaging consumer. + SpanKindConsumer SpanKind = 5 +) + +// SpanStatus represents the status of a span. +type SpanStatus int + +const ( + // SpanStatusUnset is the default status code. + SpanStatusUnset SpanStatus = 0 + + // SpanStatusError indicates the operation contains an error. + SpanStatusError SpanStatus = 1 + + // SpanStatusOK indicates the operation completed successfully. + SpanStatusOK SpanStatus = 2 +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go new file mode 100644 index 000000000000..75f757cedd3b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go @@ -0,0 +1,168 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package tracing contains the definitions needed to support distributed tracing. +package tracing + +import ( + "context" +) + +// ProviderOptions contains the optional values when creating a Provider. +type ProviderOptions struct { + // for future expansion +} + +// NewProvider creates a new Provider with the specified values. +// - newTracerFn is the underlying implementation for creating Tracer instances +// - options contains optional values; pass nil to accept the default value +func NewProvider(newTracerFn func(name, version string) Tracer, options *ProviderOptions) Provider { + return Provider{ + newTracerFn: newTracerFn, + } +} + +// Provider is the factory that creates Tracer instances. +// It defaults to a no-op provider. +type Provider struct { + newTracerFn func(name, version string) Tracer +} + +// NewTracer creates a new Tracer for the specified name and version. +// - name - the name of the tracer object, typically the fully qualified name of the service client +// - version - the version of the module in which the service client resides +func (p Provider) NewTracer(name, version string) (tracer Tracer) { + if p.newTracerFn != nil { + tracer = p.newTracerFn(name, version) + } + return +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// TracerOptions contains the optional values when creating a Tracer. +type TracerOptions struct { + // for future expansion +} + +// NewTracer creates a Tracer with the specified values. +// - newSpanFn is the underlying implementation for creating Span instances +// - options contains optional values; pass nil to accept the default value +func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + return Tracer{ + newSpanFn: newSpanFn, + } +} + +// Tracer is the factory that creates Span instances. +type Tracer struct { + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) +} + +// Start creates a new span and a context.Context that contains it. +// - ctx is the parent context for this span. If it contains a Span, the newly created span will be a child of that span, else it will be a root span +// - spanName identifies the span within a trace, it's typically the fully qualified API name +// - options contains optional values for the span, pass nil to accept any defaults +func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { + if t.newSpanFn != nil { + return t.newSpanFn(ctx, spanName, options) + } + return ctx, Span{} +} + +// SpanOptions contains optional settings for creating a span. +type SpanOptions struct { + // Kind indicates the kind of Span. + Kind SpanKind + + // Attributes contains key-value pairs of attributes for the span. + Attributes []Attribute +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// SpanImpl abstracts the underlying implementation for Span, +// allowing it to work with various tracing implementations. +// Any zero-values will have their default, no-op behavior. +type SpanImpl struct { + // End contains the implementation for the Span.End method. + End func() + + // SetAttributes contains the implementation for the Span.SetAttributes method. + SetAttributes func(...Attribute) + + // AddEvent contains the implementation for the Span.AddEvent method. + AddEvent func(string, ...Attribute) + + // AddError contains the implementation for the Span.AddError method. + AddError func(err error) + + // SetStatus contains the implementation for the Span.SetStatus method. + SetStatus func(SpanStatus, string) +} + +// NewSpan creates a Span with the specified implementation. +func NewSpan(impl SpanImpl) Span { + return Span{ + impl: impl, + } +} + +// Span is a single unit of a trace. A trace can contain multiple spans. +// A zero-value Span provides a no-op implementation. +type Span struct { + impl SpanImpl +} + +// End terminates the span and MUST be called before the span leaves scope. +// Any further updates to the span will be ignored after End is called. +func (s Span) End() { + if s.impl.End != nil { + s.impl.End() + } +} + +// SetAttributes sets the specified attributes on the Span. +// Any existing attributes with the same keys will have their values overwritten. +func (s Span) SetAttributes(attrs ...Attribute) { + if s.impl.SetAttributes != nil { + s.impl.SetAttributes(attrs...) + } +} + +// AddEvent adds a named event with an optional set of attributes to the span. +func (s Span) AddEvent(name string, attrs ...Attribute) { + if s.impl.AddEvent != nil { + s.impl.AddEvent(name, attrs...) + } +} + +// AddError adds the specified error event to the span. +func (s Span) AddError(err error) { + if s.impl.AddError != nil { + s.impl.AddError(err) + } +} + +// SetStatus sets the status on the span along with a description. +func (s Span) SetStatus(code SpanStatus, desc string) { + if s.impl.SetStatus != nil { + s.impl.SetStatus(code, desc) + } +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Attribute is a key-value pair. +type Attribute struct { + // Key is the name of the attribute. + Key string + + // Value is the attribute's value. + // Types that are natively supported include int64, float64, int, bool, string. + // Any other type will be formatted per rules of fmt.Sprintf("%v"). + Value any +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md new file mode 100644 index 000000000000..5877e476f6fa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -0,0 +1,325 @@ +# Release History + +## 1.2.0 (2022-11-08) + +### Other Changes +* This version includes all fixes and features from 1.2.0-beta.* + +## 1.2.0-beta.3 (2022-10-11) + +### Features Added +* `ManagedIdentityCredential` caches tokens in memory + +### Bugs Fixed +* `ClientCertificateCredential` sends only the leaf cert for SNI authentication + +## 1.2.0-beta.2 (2022-08-10) + +### Features Added +* Added `ClientAssertionCredential` to enable applications to authenticate + with custom client assertions + +### Other Changes +* Updated AuthenticationFailedError with links to TROUBLESHOOTING.md for relevant errors +* Upgraded `microsoft-authentication-library-for-go` requirement to v0.6.0 + +## 1.2.0-beta.1 (2022-06-07) + +### Features Added +* `EnvironmentCredential` reads certificate passwords from `AZURE_CLIENT_CERTIFICATE_PASSWORD` + ([#17099](https://github.com/Azure/azure-sdk-for-go/pull/17099)) + +## 1.1.0 (2022-06-07) + +### Features Added +* `ClientCertificateCredential` and `ClientSecretCredential` support ESTS-R. First-party + applications can set environment variable `AZURE_REGIONAL_AUTHORITY_NAME` with a + region name. + ([#15605](https://github.com/Azure/azure-sdk-for-go/issues/15605)) + +## 1.0.1 (2022-06-07) + +### Other Changes +* Upgrade `microsoft-authentication-library-for-go` requirement to v0.5.1 + ([#18176](https://github.com/Azure/azure-sdk-for-go/issues/18176)) + +## 1.0.0 (2022-05-12) + +### Features Added +* `DefaultAzureCredential` reads environment variable `AZURE_CLIENT_ID` for the + client ID of a user-assigned managed identity + ([#17293](https://github.com/Azure/azure-sdk-for-go/pull/17293)) + +### Breaking Changes +* Removed `AuthorizationCodeCredential`. Use `InteractiveBrowserCredential` instead + to authenticate a user with the authorization code flow. +* Instances of `AuthenticationFailedError` are now returned by pointer. +* `GetToken()` returns `azcore.AccessToken` by value + +### Bugs Fixed +* `AzureCLICredential` panics after receiving an unexpected error type + ([#17490](https://github.com/Azure/azure-sdk-for-go/issues/17490)) + +### Other Changes +* `GetToken()` returns an error when the caller specifies no scope +* Updated to the latest versions of `golang.org/x/crypto`, `azcore` and `internal` + +## 0.14.0 (2022-04-05) + +### Breaking Changes +* This module now requires Go 1.18 +* Removed `AuthorityHost`. Credentials are now configured for sovereign or private + clouds with the API in `azcore/cloud`, for example: + ```go + // before + opts := azidentity.ClientSecretCredentialOptions{AuthorityHost: azidentity.AzureGovernment} + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + + // after + import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + + opts := azidentity.ClientSecretCredentialOptions{} + opts.Cloud = cloud.AzureGovernment + cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts) + ``` + +## 0.13.2 (2022-03-08) + +### Bugs Fixed +* Prevented a data race in `DefaultAzureCredential` and `ChainedTokenCredential` + ([#17144](https://github.com/Azure/azure-sdk-for-go/issues/17144)) + +### Other Changes +* Upgraded App Service managed identity version from 2017-09-01 to 2019-08-01 + ([#17086](https://github.com/Azure/azure-sdk-for-go/pull/17086)) + +## 0.13.1 (2022-02-08) + +### Features Added +* `EnvironmentCredential` supports certificate SNI authentication when + `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN` is "true". + ([#16851](https://github.com/Azure/azure-sdk-for-go/pull/16851)) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken()` now returns an error when configured for + a user assigned identity in Azure Cloud Shell (which doesn't support such identities) + ([#16946](https://github.com/Azure/azure-sdk-for-go/pull/16946)) + +### Other Changes +* `NewDefaultAzureCredential()` logs non-fatal errors. These errors are also included in the + error returned by `DefaultAzureCredential.GetToken()` when it's unable to acquire a token + from any source. ([#15923](https://github.com/Azure/azure-sdk-for-go/issues/15923)) + +## 0.13.0 (2022-01-11) + +### Breaking Changes +* Replaced `AuthenticationFailedError.RawResponse()` with a field having the same name +* Unexported `CredentialUnavailableError` +* Instances of `ChainedTokenCredential` will now skip looping through the list of source credentials and re-use the first successful credential on subsequent calls to `GetToken`. + * If `ChainedTokenCredentialOptions.RetrySources` is true, `ChainedTokenCredential` will continue to try all of the originally provided credentials each time the `GetToken` method is called. + * `ChainedTokenCredential.successfulCredential` will contain a reference to the last successful credential. + * `DefaultAzureCredenial` will also re-use the first successful credential on subsequent calls to `GetToken`. + * `DefaultAzureCredential.chain.successfulCredential` will also contain a reference to the last successful credential. + +### Other Changes +* `ManagedIdentityCredential` no longer probes IMDS before requesting a token + from it. Also, an error response from IMDS no longer disables a credential + instance. Following an error, a credential instance will continue to send + requests to IMDS as necessary. +* Adopted MSAL for user and service principal authentication +* Updated `azcore` requirement to 0.21.0 + +## 0.12.0 (2021-11-02) +### Breaking Changes +* Raised minimum go version to 1.16 +* Removed `NewAuthenticationPolicy()` from credentials. Clients should instead use azcore's + `runtime.NewBearerTokenPolicy()` to construct a bearer token authorization policy. +* The `AuthorityHost` field in credential options structs is now a custom type, + `AuthorityHost`, with underlying type `string` +* `NewChainedTokenCredential` has a new signature to accommodate a placeholder + options struct: + ```go + // before + cred, err := NewChainedTokenCredential(credA, credB) + + // after + cred, err := NewChainedTokenCredential([]azcore.TokenCredential{credA, credB}, nil) + ``` +* Removed `ExcludeAzureCLICredential`, `ExcludeEnvironmentCredential`, and `ExcludeMSICredential` + from `DefaultAzureCredentialOptions` +* `NewClientCertificateCredential` requires a `[]*x509.Certificate` and `crypto.PrivateKey` instead of + a path to a certificate file. Added `ParseCertificates` to simplify getting these in common cases: + ```go + // before + cred, err := NewClientCertificateCredential("tenant", "client-id", "/cert.pem", nil) + + // after + certData, err := os.ReadFile("/cert.pem") + certs, key, err := ParseCertificates(certData, password) + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, nil) + ``` +* Removed `InteractiveBrowserCredentialOptions.ClientSecret` and `.Port` +* Removed `AADAuthenticationFailedError` +* Removed `id` parameter of `NewManagedIdentityCredential()`. User assigned identities are now + specified by `ManagedIdentityCredentialOptions.ID`: + ```go + // before + cred, err := NewManagedIdentityCredential("client-id", nil) + // or, for a resource ID + opts := &ManagedIdentityCredentialOptions{ID: ResourceID} + cred, err := NewManagedIdentityCredential("/subscriptions/...", opts) + + // after + clientID := ClientID("7cf7db0d-...") + opts := &ManagedIdentityCredentialOptions{ID: clientID} + // or, for a resource ID + resID: ResourceID("/subscriptions/...") + opts := &ManagedIdentityCredentialOptions{ID: resID} + cred, err := NewManagedIdentityCredential(opts) + ``` +* `DeviceCodeCredentialOptions.UserPrompt` has a new type: `func(context.Context, DeviceCodeMessage) error` +* Credential options structs now embed `azcore.ClientOptions`. In addition to changing literal initialization + syntax, this change renames `HTTPClient` fields to `Transport`. +* Renamed `LogCredential` to `EventCredential` +* `AzureCLICredential` no longer reads the environment variable `AZURE_CLI_PATH` +* `NewManagedIdentityCredential` no longer reads environment variables `AZURE_CLIENT_ID` and + `AZURE_RESOURCE_ID`. Use `ManagedIdentityCredentialOptions.ID` instead. +* Unexported `AuthenticationFailedError` and `CredentialUnavailableError` structs. In their place are two + interfaces having the same names. + +### Bugs Fixed +* `AzureCLICredential.GetToken` no longer mutates its `opts.Scopes` + +### Features Added +* Added connection configuration options to `DefaultAzureCredentialOptions` +* `AuthenticationFailedError.RawResponse()` returns the HTTP response motivating the error, + if available + +### Other Changes +* `NewDefaultAzureCredential()` returns `*DefaultAzureCredential` instead of `*ChainedTokenCredential` +* Added `TenantID` field to `DefaultAzureCredentialOptions` and `AzureCLICredentialOptions` + +## 0.11.0 (2021-09-08) +### Breaking Changes +* Unexported `AzureCLICredentialOptions.TokenProvider` and its type, + `AzureCLITokenProvider` + +### Bug Fixes +* `ManagedIdentityCredential.GetToken` returns `CredentialUnavailableError` + when IMDS has no assigned identity, signaling `DefaultAzureCredential` to + try other credentials + + +## 0.10.0 (2021-08-30) +### Breaking Changes +* Update based on `azcore` refactor [#15383](https://github.com/Azure/azure-sdk-for-go/pull/15383) + +## 0.9.3 (2021-08-20) + +### Bugs Fixed +* `ManagedIdentityCredential.GetToken` no longer mutates its `opts.Scopes` + +### Other Changes +* Bumps version of `azcore` to `v0.18.1` + + +## 0.9.2 (2021-07-23) +### Features Added +* Adding support for Service Fabric environment in `ManagedIdentityCredential` +* Adding an option for using a resource ID instead of client ID in `ManagedIdentityCredential` + + +## 0.9.1 (2021-05-24) +### Features Added +* Add LICENSE.txt and bump version information + + +## 0.9.0 (2021-05-21) +### Features Added +* Add support for authenticating in Azure Stack environments +* Enable user assigned identities for the IMDS scenario in `ManagedIdentityCredential` +* Add scope to resource conversion in `GetToken()` on `ManagedIdentityCredential` + + +## 0.8.0 (2021-01-20) +### Features Added +* Updating documentation + + +## 0.7.1 (2021-01-04) +### Features Added +* Adding port option to `InteractiveBrowserCredential` + + +## 0.7.0 (2020-12-11) +### Features Added +* Add `redirectURI` parameter back to authentication code flow + + +## 0.6.1 (2020-12-09) +### Features Added +* Updating query parameter in `ManagedIdentityCredential` and updating datetime string for parsing managed identity access tokens. + + +## 0.6.0 (2020-11-16) +### Features Added +* Remove `RedirectURL` parameter from auth code flow to align with the MSAL implementation which relies on the native client redirect URL. + + +## 0.5.0 (2020-10-30) +### Features Added +* Flattening credential options + + +## 0.4.3 (2020-10-21) +### Features Added +* Adding Azure Arc support in `ManagedIdentityCredential` + + +## 0.4.2 (2020-10-16) +### Features Added +* Typo fixes + + +## 0.4.1 (2020-10-16) +### Features Added +* Ensure authority hosts are only HTTPs + + +## 0.4.0 (2020-10-16) +### Features Added +* Adding options structs for credentials + + +## 0.3.0 (2020-10-09) +### Features Added +* Update `DeviceCodeCredential` callback + + +## 0.2.2 (2020-10-09) +### Features Added +* Add `AuthorizationCodeCredential` + + +## 0.2.1 (2020-10-06) +### Features Added +* Add `InteractiveBrowserCredential` + + +## 0.2.0 (2020-09-11) +### Features Added +* Refactor `azidentity` on top of `azcore` refactor +* Updated policies to conform to `policy.Policy` interface changes. +* Updated non-retriable errors to conform to `azcore.NonRetriableError`. +* Fixed calls to `Request.SetBody()` to include content type. +* Switched endpoints to string types and removed extra parsing code. + + +## 0.1.1 (2020-09-02) +### Features Added +* Add `AzureCLICredential` to `DefaultAzureCredential` chain + + +## 0.1.0 (2020-07-23) +### Features Added +* Initial Release. Azure Identity library that provides Azure Active Directory token authentication support for the SDK. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt new file mode 100644 index 000000000000..48ea6616b5b8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md new file mode 100644 index 000000000000..4ac53eb7b276 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -0,0 +1,307 @@ +# Migrating from autorest/adal to azidentity + +`azidentity` provides Azure Active Directory (Azure AD) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. + +This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. + +## Table of contents + +- [Acquire a token](#acquire-a-token) +- [Client certificate authentication](#client-certificate-authentication) +- [Client secret authentication](#client-secret-authentication) +- [Configuration](#configuration) +- [Device code authentication](#device-code-authentication) +- [Managed identity](#managed-identity) +- [Use azidentity credentials with older packages](#use-azidentity-credentials-with-older-packages) + +## Configuration + +### `autorest/adal` + +Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires an Azure AD endpoint and tenant: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.chinacloudapi.cn", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.chinacloudapi.cn/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +``` + +### `azidentity` + +A credential instance can acquire tokens for any audience. The audience for each token is determined by the client requesting it. Credentials require endpoint configuration only for sovereign or private clouds. The `azcore/cloud` package has predefined configuration for sovereign clouds such as Azure China: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +clientOpts := azcore.ClientOptions{Cloud: cloud.AzureChina} + +cred, err := azidentity.NewClientSecretCredential( + tenantID, clientID, secret, &azidentity.ClientSecretCredentialOptions{ClientOptions: clientOpts}, +) +handle(err) +``` + +## Client secret authentication + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://management.azure.com/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Client certificate authentication + +### `autorest/adal` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +handle(err) + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, clientID, certificate, rsaPrivateKey, "https://management.azure.com/", +) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +certData, err := os.ReadFile("./example.pfx") +handle(err) + +certs, key, err := azidentity.ParseCertificates(certData, nil) +handle(err) + +cred, err = azidentity.NewClientCertificateCredential(tenantID, clientID, certs, key, nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +## Managed identity + +### `autorest/adal` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/", nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewManagedIdentityCredential(nil) +handle(err) + +client, err := armsubscriptions.NewClient(cred, nil) +handle(err) +``` + +### User-assigned identities + +`autorest/adal`: + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +opts := &adal.ManagedIdentityOptions{ClientID: "..."} +spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/") +handle(err) +``` + +`azidentity`: + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + +opts := azidentity.ManagedIdentityCredentialOptions{ID: azidentity.ClientID("...")} +cred, err := azidentity.NewManagedIdentityCredential(&opts) +handle(err) +``` + +## Device code authentication + +### `autorest/adal` + +```go +import ( + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +oauthClient := &http.Client{} +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) +resource := "https://management.azure.com/" +deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthCfg, clientID, resource) +handle(err) + +// display instructions, wait for the user to authenticate +fmt.Println(*deviceCode.Message) +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +handle(err) + +spt, err := adal.NewServicePrincipalTokenFromManualToken(*oauthCfg, clientID, resource, *token) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = autorest.NewBearerAuthorizer(spt) +``` + +### `azidentity` + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions" +) + +cred, err := azidentity.NewDeviceCodeCredential(nil) +handle(err) + +client, err := armsubscriptions.NewSubscriptionsClient(cred, nil) +handle(err) +``` + +`azidentity.DeviceCodeCredential` will guide a user through authentication, printing instructions to the console by default. The user prompt is customizable. For more information, see the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential). + +## Acquire a token + +### `autorest/adal` + +```go +import "github.com/Azure/go-autorest/autorest/adal" + +oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID) +handle(err) + +spt, err := adal.NewServicePrincipalTokenWithSecret( + *oauthCfg, clientID, "https://vault.azure.net", &adal.ServicePrincipalTokenSecret{ClientSecret: secret}, +) + +err = spt.Refresh() +if err == nil { + token := spt.Token +} +``` + +### `azidentity` + +In ordinary usage, application code doesn't need to request tokens from credentials directly. Azure SDK clients handle token acquisition and refreshing internally. However, applications may call `GetToken()` to do so. All credential types have this method. + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +tk, err := cred.GetToken( + context.TODO(), policy.TokenRequestOptions{Scopes: []string{"https://vault.azure.net/.default"}}, +) +if err == nil { + token := tk.Token +} +``` + +Note that `azidentity` credentials use the Azure AD v2.0 endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent). + +## Use azidentity credentials with older packages + +The [azidext module](https://pkg.go.dev/github.com/jongio/azidext/go/azidext) provides an adapter for `azidentity` credential types. The adapter enables using the credential types with older Azure SDK clients. For example: + +```go +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions" + "github.com/jongio/azidext/go/azidext" +) + +cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil) +handle(err) + +client := subscriptions.NewClient() +client.Authorizer = azidext.NewTokenCredentialAdapter(cred, []string{"https://management.azure.com//.default"}) +``` + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FMIGRATION.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md new file mode 100644 index 000000000000..2df42c813a5e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -0,0 +1,241 @@ +# Azure Identity Client Module for Go + +The Azure Identity module provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) +| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/) +| [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) + +# Getting started + +## Install the module + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Install the Azure Identity module: + +```sh +go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +## Prerequisites + +- an [Azure subscription](https://azure.microsoft.com/free/) +- Go 1.18 + +### Authenticating during local development + +When debugging and executing code locally, developers typically use their own accounts to authenticate calls to Azure services. The `azidentity` module supports authenticating through developer tools to simplify local development. + +#### Authenticating via the Azure CLI + +`DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user +signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. + +When no default browser is available, `az login` will use the device code +authentication flow. This can also be selected manually by running `az login --use-device-code`. + +## Key concepts + +### Credentials + +A credential is a type which contains or can obtain the data needed for a +service client to authenticate requests. Service clients across the Azure SDK +accept a credential instance when they are constructed, and use that credential +to authenticate requests. + +The `azidentity` module focuses on OAuth authentication with Azure Active +Directory (AAD). It offers a variety of credential types capable of acquiring +an Azure AD access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. + +### DefaultAzureCredential + +`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: + +![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) + +1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate. +2. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. +3. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. + +> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. + +## Managed Identity + +`DefaultAzureCredential` and `ManagedIdentityCredential` support +[managed identity authentication](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) +in any hosting environment which supports managed identities, such as (this list is not exhaustive): +* [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity) +* [Azure Arc](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) +* [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/msi-authorization) +* [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/use-managed-identity) +* [Azure Service Fabric](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity) +* [Azure Virtual Machines](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token) + +## Examples + +- [Authenticate with DefaultAzureCredential](#authenticate-with-defaultazurecredential "Authenticate with DefaultAzureCredential") +- [Define a custom authentication flow with ChainedTokenCredential](#define-a-custom-authentication-flow-with-chainedtokencredential "Define a custom authentication flow with ChainedTokenCredential") +- [Specify a user-assigned managed identity for DefaultAzureCredential](#specify-a-user-assigned-managed-identity-for-defaultazurecredential) + +### Authenticate with DefaultAzureCredential + +This example demonstrates authenticating a client from the `armresources` module with `DefaultAzureCredential`. + +```go +cred, err := azidentity.NewDefaultAzureCredential(nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", cred, nil) +``` + +### Specify a user-assigned managed identity for DefaultAzureCredential + +To configure `DefaultAzureCredential` to authenticate a user-assigned managed identity, set the environment variable `AZURE_CLIENT_ID` to the identity's client ID. + +### Define a custom authentication flow with `ChainedTokenCredential` + +`DefaultAzureCredential` is generally the quickest way to get started developing apps for Azure. For more advanced scenarios, `ChainedTokenCredential` links multiple credential instances to be tried sequentially when authenticating. It will try each chained credential in turn until one provides a token or fails to authenticate due to an error. + +The following example demonstrates creating a credential, which will attempt to authenticate using managed identity. It will fall back to authenticating via the Azure CLI when a managed identity is unavailable. + +```go +managed, err := azidentity.NewManagedIdentityCredential(nil) +if err != nil { + // handle error +} +azCLI, err := azidentity.NewAzureCLICredential(nil) +if err != nil { + // handle error +} +chain, err := azidentity.NewChainedTokenCredential([]azcore.TokenCredential{managed, azCLI}, nil) +if err != nil { + // handle error +} + +client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) +``` + +## Credential Types + +### Authenticating Azure Hosted Applications + +|Credential|Usage +|-|- +|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps +|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials +|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables +|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource + +### Authenticating Service Principals + +|Credential|Usage +|-|- +|[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.2.0-beta.2#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion +|[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate +|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret + +### Authenticating Users + +|Credential|Usage +|-|- +|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser +|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI +|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password + +### Authenticating via Development Tools + +|Credential|Usage +|-|- +|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI + +## Environment Variables + +`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables: + +#### Service principal with secret + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_SECRET`|one of the application's client secrets + +#### Service principal with certificate + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key +|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any + +#### Username and password + +|variable name|value +|-|- +|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_USERNAME`|a username (usually an email address) +|`AZURE_PASSWORD`|that user's password + +Configuration is attempted in the above order. For example, if values for a +client secret and certificate are both present, the client secret will be used. + +## Troubleshooting + +### Error Handling + +Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). + +For more details on handling specific Azure Active Directory errors please refer to the +Azure Active Directory +[error code documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes). + +### Logging + +This module uses the classification-based logging implementation in `azcore`. To enable console logging for all SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. Use the `azcore/log` package to control log event output or to enable logs for `azidentity` only. For example: +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +Credentials log basic information only, such as `GetToken` success or failure and errors. These log entries don't contain authentication secrets but may contain sensitive information. + +## Next steps + +Client and management modules listed on the [Azure SDK releases page](https://azure.github.io/azure-sdk/releases/latest/go.html) support authenticating with `azidentity` credential types. You can learn more about using these libraries in their documentation, which is linked from the release page. + +## Provide Feedback + +If you encounter bugs or have suggestions, please +[open an issue](https://github.com/Azure/azure-sdk-for-go/issues). + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md new file mode 100644 index 000000000000..affa91d08748 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -0,0 +1,197 @@ +# Troubleshoot Azure Identity authentication issues + +This troubleshooting guide covers failure investigation techniques, common errors for the credential types in the `azidentity` module, and mitigation steps to resolve these errors. + +## Table of contents + +- [Handle azidentity errors](#handle-azidentity-errors) + - [Permission issues](#permission-issues) +- [Find relevant information in errors](#find-relevant-information-in-errors) +- [Enable and configure logging](#enable-and-configure-logging) +- [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) +- [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues) +- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) +- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) +- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues) +- [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues) + - [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity) + - [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity) + - [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity) +- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Get additional help](#get-additional-help) + +## Handle azidentity errors + +Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Azure Active Directory (Azure AD). Depending on the application, these errors may or may not be recoverable. + +### Permission issues + +Service client errors with a status code of 401 or 403 often indicate that authentication succeeded but the caller doesn't have permission to access the specified API. Check the service documentation to determine which RBAC roles are needed for the request, and ensure the authenticated user or service principal has the appropriate role assignments. + +## Find relevant information in errors + +Authentication errors can include responses from Azure AD and often contain information helpful in diagnosis. Consider the following error message: + +``` +ClientSecretCredential authentication failed +POST https://login.microsoftonline.com/3c631bb7-a9f7-4343-a5ba-a615913/oauth2/v2.0/token +-------------------------------------------------------------------------------- +RESPONSE 401 Unauthorized +-------------------------------------------------------------------------------- +{ + "error": "invalid_client", + "error_description": "AADSTS7000215: Invalid client secret provided. Ensure the secret being sent in the request is the client secret value, not the client secret ID, for a secret added to app '86be4c01-505b-45e9-bfc0-9b825fd84'.\r\nTrace ID: 03da4b8e-5ffe-48ca-9754-aff4276f0100\r\nCorrelation ID: 7b12f9bb-2eef-42e3-ad75-eee69ec9088d\r\nTimestamp: 2022-03-02 18:25:26Z", + "error_codes": [ + 7000215 + ], + "timestamp": "2022-03-02 18:25:26Z", + "trace_id": "03da4b8e-5ffe-48ca-9754-aff4276f0100", + "correlation_id": "7b12f9bb-2eef-42e3-ad75-eee69ec9088d", + "error_uri": "https://login.microsoftonline.com/error?code=7000215" +} +-------------------------------------------------------------------------------- +``` + +This error contains several pieces of information: + +- __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. + +- __Azure AD Error Code and Message__: The error code and message returned by Azure AD. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes#aadsts-error-codes) has more information on AADSTS error codes. + +- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Azure AD failures. + +### Enable and configure logging + +`azidentity` provides the same logging capabilities as the rest of the Azure SDK. The simplest way to see the logs to help debug authentication issues is to print credential logs to the console. +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Println(s) +}) + +// include only azidentity credential logs +azlog.SetEvents(azidentity.EventAuthentication) +``` + +## Troubleshoot DefaultAzureCredential authentication issues + +| Error |Description| Mitigation | +|---|---|---| +|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
  • [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
  • Consult the troubleshooting guide for underlying credential types for more information.
    • [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
    • [ManagedIdentityCredential](#troubleshoot-visualstudiocredential-authentication-issues)
    • [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
    | +|Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|
    • [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
    • If an unexpected credential is returning a token, check application configuration such as environment variables.
    • Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
    | + +## Troubleshoot EnvironmentCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Missing or incomplete environment variable configuration|A valid combination of environment variables wasn't set|Ensure the appropriate environment variables are set for the intended authentication method as described in the [module documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)| + + +## Troubleshoot ClientSecretCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + + +## Troubleshoot ClientCertificateCredential authentication issues + +| Error Code | Description | Mitigation | +|---|---|---| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| + + +## Troubleshoot UsernamePasswordCredential authentication issues + +| Error Code | Issue | Mitigation | +|---|---|---| +|AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.| + + +## Troubleshoot ManagedIdentityCredential authentication issues + +`ManagedIdentityCredential` is designed to work on a variety of Azure hosts support managed identity. Configuration and troubleshooting vary from host to host. The below table lists the Azure hosts that can be assigned a managed identity and are supported by `ManagedIdentityCredential`. + +|Host Environment| | | +|---|---|---| +|Azure Virtual Machines and Scale Sets|[Configuration](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| +|Azure App Service and Azure Functions|[Configuration](https://docs.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| +|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)| +|Azure Arc|[Configuration](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| +|Azure Service Fabric|[Configuration](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)|| + +### Azure Virtual Machine managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

    If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| +|The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`| +|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|

    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | +|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
    • Refer to the error message for more details on specific failures.
    • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
    • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
    | + +#### Verify IMDS is available on the VM + +If you have access to the VM, you can use `curl` to verify the managed identity endpoint is available. + +```sh +curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://management.core.windows.net&api-version=2018-02-01' -H "Metadata: true" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure App Service and Azure Functions managed identity + +| Error Message |Description| Mitigation | +|---|---|---| +|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
    • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://docs.microsoft.com/azure/app-service/overview-managed-identity).
    • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
    | + +#### Verify the App Service managed identity endpoint is available + +If you can SSH into the App Service, you can verify managed identity is available in the environment. First ensure the environment variables `IDENTITY_ENDPOINT` and `IDENTITY_SECRET` are set. Then you can verify the managed identity endpoint is available using `curl`. + +```sh +curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-version=2019-08-01" -H "X-IDENTITY-HEADER: $IDENTITY_HEADER" +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +### Azure Kubernetes Service managed identity + +#### Pod Identity + +| Error Message |Description| Mitigation | +|---|---|---| +|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). + + +## Troubleshoot AzureCliCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
    • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://docs.microsoft.com/cli/azure/install-azure-cli).
    • Validate the installation location is in the application's `PATH` environment variable.
    | +|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
    • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://docs.microsoft.com/cli/azure/authenticate-azure-cli).
    • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
    | + +#### Verify the Azure CLI can obtain tokens + +You can manually verify that the Azure CLI can authenticate and obtain tokens. First, use the `account` command to verify the logged in account. + +```azurecli +az account show +``` + +Once you've verified the Azure CLI is using the correct account, you can validate that it's able to obtain tokens for that account. + +```azurecli +az account get-access-token --output json --resource https://management.core.windows.net +``` + +> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +## Get additional help + +Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go new file mode 100644 index 000000000000..60c3b9a1ec6d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -0,0 +1,165 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "net/url" + "os" + "regexp" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const ( + azureAuthorityHost = "AZURE_AUTHORITY_HOST" + azureClientCertificatePassword = "AZURE_CLIENT_CERTIFICATE_PASSWORD" + azureClientCertificatePath = "AZURE_CLIENT_CERTIFICATE_PATH" + azureClientID = "AZURE_CLIENT_ID" + azureClientSecret = "AZURE_CLIENT_SECRET" + azurePassword = "AZURE_PASSWORD" + azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME" + azureTenantID = "AZURE_TENANT_ID" + azureUsername = "AZURE_USERNAME" + + organizationsTenantID = "organizations" + developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" + defaultSuffix = "/.default" + tenantIDValidationErr = "invalid tenantID. You can locate your tenantID by following the instructions listed here: https://docs.microsoft.com/partner-center/find-ids-and-domain-names" +) + +func getConfidentialClient(clientID, tenantID string, cred confidential.Credential, co *azcore.ClientOptions, additionalOpts ...confidential.Option) (confidential.Client, error) { + if !validTenantID(tenantID) { + return confidential.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return confidential.Client{}, err + } + o := []confidential.Option{ + confidential.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), + confidential.WithHTTPClient(newPipelineAdapter(co)), + } + o = append(o, additionalOpts...) + return confidential.New(clientID, cred, o...) +} + +func getPublicClient(clientID, tenantID string, co *azcore.ClientOptions) (public.Client, error) { + if !validTenantID(tenantID) { + return public.Client{}, errors.New(tenantIDValidationErr) + } + authorityHost, err := setAuthorityHost(co.Cloud) + if err != nil { + return public.Client{}, err + } + return public.New(clientID, + public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), + public.WithHTTPClient(newPipelineAdapter(co)), + ) +} + +// setAuthorityHost initializes the authority host for credentials. Precedence is: +// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user +// 2. value of AZURE_AUTHORITY_HOST +// 3. default: Azure Public Cloud +func setAuthorityHost(cc cloud.Configuration) (string, error) { + host := cc.ActiveDirectoryAuthorityHost + if host == "" { + if len(cc.Services) > 0 { + return "", errors.New("missing ActiveDirectoryAuthorityHost for specified cloud") + } + host = cloud.AzurePublic.ActiveDirectoryAuthorityHost + if envAuthorityHost := os.Getenv(azureAuthorityHost); envAuthorityHost != "" { + host = envAuthorityHost + } + } + u, err := url.Parse(host) + if err != nil { + return "", err + } + if u.Scheme != "https" { + return "", errors.New("cannot use an authority host without https") + } + return host, nil +} + +// validTenantID return true is it receives a valid tenantID, returns false otherwise +func validTenantID(tenantID string) bool { + match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID) + if err != nil { + return false + } + return match +} + +func newPipelineAdapter(opts *azcore.ClientOptions) pipelineAdapter { + pl := runtime.NewPipeline(component, version, runtime.PipelineOptions{}, opts) + return pipelineAdapter{pl: pl} +} + +type pipelineAdapter struct { + pl runtime.Pipeline +} + +func (p pipelineAdapter) CloseIdleConnections() { + // do nothing +} + +func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { + req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String()) + if err != nil { + return nil, err + } + if r.Body != nil && r.Body != http.NoBody { + // create a rewindable body from the existing body as required + var body io.ReadSeekCloser + if rsc, ok := r.Body.(io.ReadSeekCloser); ok { + body = rsc + } else { + b, err := io.ReadAll(r.Body) + if err != nil { + return nil, err + } + body = streaming.NopCloser(bytes.NewReader(b)) + } + err = req.SetBody(body, r.Header.Get("Content-Type")) + if err != nil { + return nil, err + } + } + resp, err := p.pl.Do(req) + if err != nil { + return nil, err + } + return resp, err +} + +// enables fakes for test scenarios +type confidentialClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireTokenSilentOption) (confidential.AuthResult, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireTokenByAuthCodeOption) (confidential.AuthResult, error) + AcquireTokenByCredential(ctx context.Context, scopes []string) (confidential.AuthResult, error) +} + +// enables fakes for test scenarios +type publicClient interface { + AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireTokenSilentOption) (public.AuthResult, error) + AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string) (public.AuthResult, error) + AcquireTokenByDeviceCode(ctx context.Context, scopes []string) (public.DeviceCode, error) + AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...public.AcquireTokenByAuthCodeOption) (public.AuthResult, error) + AcquireTokenInteractive(ctx context.Context, scopes []string, options ...public.InteractiveAuthOption) (public.AuthResult, error) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go new file mode 100644 index 000000000000..68f46d51a1ef --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -0,0 +1,189 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +const credNameAzureCLI = "AzureCLICredential" + +// used by tests to fake invoking the CLI +type azureCLITokenProvider func(ctx context.Context, resource string, tenantID string) ([]byte, error) + +// AzureCLICredentialOptions contains optional parameters for AzureCLICredential. +type AzureCLICredentialOptions struct { + // TenantID identifies the tenant the credential should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. + TenantID string + + tokenProvider azureCLITokenProvider +} + +// init returns an instance of AzureCLICredentialOptions initialized with default values. +func (o *AzureCLICredentialOptions) init() { + if o.tokenProvider == nil { + o.tokenProvider = defaultTokenProvider() + } +} + +// AzureCLICredential authenticates as the identity logged in to the Azure CLI. +type AzureCLICredential struct { + tokenProvider azureCLITokenProvider + tenantID string +} + +// NewAzureCLICredential constructs an AzureCLICredential. Pass nil to accept default options. +func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredential, error) { + cp := AzureCLICredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + return &AzureCLICredential{ + tokenProvider: cp.tokenProvider, + tenantID: cp.TenantID, + }, nil +} + +// GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI. +// This method is called automatically by Azure SDK clients. +func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) != 1 { + return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + } + // CLI expects an AAD v1 resource, not a v2 scope + scope := strings.TrimSuffix(opts.Scopes[0], defaultSuffix) + at, err := c.authenticate(ctx, scope) + if err != nil { + return azcore.AccessToken{}, err + } + logGetTokenSuccess(c, opts) + return at, nil +} + +const timeoutCLIRequest = 10 * time.Second + +func (c *AzureCLICredential) authenticate(ctx context.Context, resource string) (azcore.AccessToken, error) { + output, err := c.tokenProvider(ctx, resource, c.tenantID) + if err != nil { + return azcore.AccessToken{}, err + } + + return c.createAccessToken(output) +} + +func defaultTokenProvider() func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + return func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err + } + if !match { + return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource) + } + + ctx, cancel := context.WithTimeout(ctx, timeoutCLIRequest) + defer cancel() + + commandLine := "az account get-access-token -o json --resource " + resource + if tenantID != "" { + commandLine += " --tenant " + tenantID + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") { + msg = "Azure CLI not found on path" + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureCLI, msg) + } + + return output, nil + } +} + +func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { + t := struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` + }{} + err := json.Unmarshal(tk, &t) + if err != nil { + return azcore.AccessToken{}, err + } + + tokenExpirationDate, err := parseExpirationDate(t.ExpiresOn) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) + } + + converted := azcore.AccessToken{ + Token: t.AccessToken, + ExpiresOn: *tokenExpirationDate, + } + return converted, nil +} + +// parseExpirationDate parses either a Azure CLI or CloudShell date into a time object +func parseExpirationDate(input string) (*time.Time, error) { + // CloudShell (and potentially the Azure CLI in future) + expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) + if cloudShellErr != nil { + // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) + const cliFormat = "2006-01-02 15:04:05.999999" + expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) + if cliErr != nil { + return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) + } + return &expirationDate, nil + } + return &expirationDate, nil +} + +var _ azcore.TokenCredential = (*AzureCLICredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go new file mode 100644 index 000000000000..86a89064569a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -0,0 +1,133 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// ChainedTokenCredentialOptions contains optional parameters for ChainedTokenCredential. +type ChainedTokenCredentialOptions struct { + // RetrySources configures how the credential uses its sources. When true, the credential always attempts to + // authenticate through each source in turn, stopping when one succeeds. When false, the credential authenticates + // only through this first successful source--it never again tries the sources which failed. + RetrySources bool +} + +// ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default, +// it tries all the credentials until one authenticates, after which it always uses that credential. +type ChainedTokenCredential struct { + cond *sync.Cond + iterating bool + name string + retrySources bool + sources []azcore.TokenCredential + successfulCredential azcore.TokenCredential +} + +// NewChainedTokenCredential creates a ChainedTokenCredential. Pass nil for options to accept defaults. +func NewChainedTokenCredential(sources []azcore.TokenCredential, options *ChainedTokenCredentialOptions) (*ChainedTokenCredential, error) { + if len(sources) == 0 { + return nil, errors.New("sources must contain at least one TokenCredential") + } + for _, source := range sources { + if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil + return nil, errors.New("sources cannot contain nil") + } + } + cp := make([]azcore.TokenCredential, len(sources)) + copy(cp, sources) + if options == nil { + options = &ChainedTokenCredentialOptions{} + } + return &ChainedTokenCredential{ + cond: sync.NewCond(&sync.Mutex{}), + name: "ChainedTokenCredential", + retrySources: options.RetrySources, + sources: cp, + }, nil +} + +// GetToken calls GetToken on the chained credentials in turn, stopping when one returns a token. +// This method is called automatically by Azure SDK clients. +func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if !c.retrySources { + // ensure only one goroutine at a time iterates the sources and perhaps sets c.successfulCredential + c.cond.L.Lock() + for { + if c.successfulCredential != nil { + c.cond.L.Unlock() + return c.successfulCredential.GetToken(ctx, opts) + } + if !c.iterating { + c.iterating = true + // allow other goroutines to wait while this one iterates + c.cond.L.Unlock() + break + } + c.cond.Wait() + } + } + + var err error + var errs []error + var token azcore.AccessToken + var successfulCredential azcore.TokenCredential + for _, cred := range c.sources { + token, err = cred.GetToken(ctx, opts) + if err == nil { + log.Writef(EventAuthentication, "%s authenticated with %s", c.name, extractCredentialName(cred)) + successfulCredential = cred + break + } + errs = append(errs, err) + if _, ok := err.(*credentialUnavailableError); !ok { + break + } + } + if c.iterating { + c.cond.L.Lock() + c.successfulCredential = successfulCredential + c.iterating = false + c.cond.L.Unlock() + c.cond.Broadcast() + } + // err is the error returned by the last GetToken call. It will be nil when that call succeeds + if err != nil { + // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise + msg := createChainedErrorMessage(errs) + if _, ok := err.(*credentialUnavailableError); ok { + err = newCredentialUnavailableError(c.name, msg) + } else { + res := getResponseFromError(err) + err = newAuthenticationFailedError(c.name, msg, res) + } + } + return token, err +} + +func createChainedErrorMessage(errs []error) string { + msg := "failed to acquire a token.\nAttempted credentials:" + for _, err := range errs { + msg += fmt.Sprintf("\n\t%s", err.Error()) + } + return msg +} + +func extractCredentialName(credential azcore.TokenCredential) string { + return strings.TrimPrefix(fmt.Sprintf("%T", credential), "*azidentity.") +} + +var _ azcore.TokenCredential = (*ChainedTokenCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml new file mode 100644 index 000000000000..3b443e8eedb2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -0,0 +1,47 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azidentity/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + RunLiveTests: true + ServiceDirectory: 'azidentity' + PreSteps: + - pwsh: | + [System.Convert]::FromBase64String($env:PFX_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/test.pfx -AsByteStream + Set-Content -Path $(Agent.TempDirectory)/test.pem -Value $env:PEM_CONTENTS + [System.Convert]::FromBase64String($env:SNI_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/testsni.pfx -AsByteStream + env: + PFX_CONTENTS: $(net-identity-spcert-pfx) + PEM_CONTENTS: $(net-identity-spcert-pem) + SNI_CONTENTS: $(net-identity-spcert-sni) + EnvVars: + AZURE_IDENTITY_TEST_TENANTID: $(net-identity-tenantid) + AZURE_IDENTITY_TEST_USERNAME: $(net-identity-username) + AZURE_IDENTITY_TEST_PASSWORD: $(net-identity-password) + IDENTITY_SP_TENANT_ID: $(net-identity-sp-tenantid) + IDENTITY_SP_CLIENT_ID: $(net-identity-sp-clientid) + IDENTITY_SP_CLIENT_SECRET: $(net-identity-sp-clientsecret) + IDENTITY_SP_CERT_PEM: $(Agent.TempDirectory)/test.pem + IDENTITY_SP_CERT_PFX: $(Agent.TempDirectory)/test.pfx + IDENTITY_SP_CERT_SNI: $(Agent.TempDirectory)/testsni.pfx diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go new file mode 100644 index 000000000000..ffcf2094be20 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -0,0 +1,74 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameAssertion = "ClientAssertionCredential" + +// ClientAssertionCredential authenticates an application with assertions provided by a callback function. +// This credential is for advanced scenarios. ClientCertificateCredential has a more convenient API for +// the most common assertion scenario, authenticating a service principal with a certificate. See +// [Azure AD documentation] for details of the assertion format. +// +// [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +type ClientAssertionCredential struct { + client confidentialClient +} + +// ClientAssertionCredentialOptions contains optional parameters for ClientAssertionCredential. +type ClientAssertionCredentialOptions struct { + azcore.ClientOptions +} + +// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. +func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(context.Context) (string, error), options *ClientAssertionCredentialOptions) (*ClientAssertionCredential, error) { + if getAssertion == nil { + return nil, errors.New("getAssertion must be a function that returns assertions") + } + if options == nil { + options = &ClientAssertionCredentialOptions{} + } + cred := confidential.NewCredFromAssertionCallback( + func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) { + return getAssertion(ctx) + }, + ) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions) + if err != nil { + return nil, err + } + return &ClientAssertionCredential{client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameAssertion + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + + ar, err = c.client.AcquireTokenByCredential(ctx, opts.Scopes) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameAssertion, err) + } + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go new file mode 100644 index 000000000000..a61d824ef59f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -0,0 +1,163 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "golang.org/x/crypto/pkcs12" +) + +const credNameCert = "ClientCertificateCredential" + +// ClientCertificateCredentialOptions contains optional parameters for ClientCertificateCredential. +type ClientCertificateCredentialOptions struct { + azcore.ClientOptions + + // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c + // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. + // Defaults to False. + SendCertificateChain bool +} + +// ClientCertificateCredential authenticates a service principal with a certificate. +type ClientCertificateCredential struct { + client confidentialClient +} + +// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. +func NewClientCertificateCredential(tenantID string, clientID string, certs []*x509.Certificate, key crypto.PrivateKey, options *ClientCertificateCredentialOptions) (*ClientCertificateCredential, error) { + if len(certs) == 0 { + return nil, errors.New("at least one certificate is required") + } + if options == nil { + options = &ClientCertificateCredentialOptions{} + } + cred, err := confidential.NewCredFromCertChain(certs, key) + if err != nil { + return nil, err + } + var o []confidential.Option + if options.SendCertificateChain { + o = append(o, confidential.WithX5C()) + } + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, o...) + if err != nil { + return nil, err + } + return &ClientCertificateCredential{client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameCert + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + + ar, err = c.client.AcquireTokenByCredential(ctx, opts.Scopes) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameCert, err) + } + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +// ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. +// Pass nil for password if the private key isn't encrypted. This function can't decrypt keys in PEM format. +func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, crypto.PrivateKey, error) { + var blocks []*pem.Block + var err error + if len(password) == 0 { + blocks, err = loadPEMCert(certData) + } + if len(blocks) == 0 || err != nil { + blocks, err = loadPKCS12Cert(certData, string(password)) + } + if err != nil { + return nil, nil, err + } + var certs []*x509.Certificate + var pk crypto.PrivateKey + for _, block := range blocks { + switch block.Type { + case "CERTIFICATE": + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certs = append(certs, c) + case "PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + } + if err != nil { + return nil, nil, err + } + case "RSA PRIVATE KEY": + if pk != nil { + return nil, nil, errors.New("certData contains multiple private keys") + } + pk, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + } + } + if len(certs) == 0 { + return nil, nil, errors.New("found no certificate") + } + if pk == nil { + return nil, nil, errors.New("found no private key") + } + return certs, pk, nil +} + +func loadPEMCert(certData []byte) ([]*pem.Block, error) { + blocks := []*pem.Block{} + for { + var block *pem.Block + block, certData = pem.Decode(certData) + if block == nil { + break + } + blocks = append(blocks, block) + } + if len(blocks) == 0 { + return nil, errors.New("didn't find any PEM blocks") + } + return blocks, nil +} + +func loadPKCS12Cert(certData []byte, password string) ([]*pem.Block, error) { + blocks, err := pkcs12.ToPEM(certData, password) + if err != nil { + return nil, err + } + if len(blocks) == 0 { + // not mentioning PKCS12 in this message because we end up here when certData is garbage + return nil, errors.New("didn't find any certificate content") + } + return blocks, err +} + +var _ azcore.TokenCredential = (*ClientCertificateCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go new file mode 100644 index 000000000000..1c3a516601b3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -0,0 +1,65 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameSecret = "ClientSecretCredential" + +// ClientSecretCredentialOptions contains optional parameters for ClientSecretCredential. +type ClientSecretCredentialOptions struct { + azcore.ClientOptions +} + +// ClientSecretCredential authenticates an application with a client secret. +type ClientSecretCredential struct { + client confidentialClient +} + +// NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults. +func NewClientSecretCredential(tenantID string, clientID string, clientSecret string, options *ClientSecretCredentialOptions) (*ClientSecretCredential, error) { + if options == nil { + options = &ClientSecretCredentialOptions{} + } + cred, err := confidential.NewCredFromSecret(clientSecret) + if err != nil { + return nil, err + } + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions) + if err != nil { + return nil, err + } + return &ClientSecretCredential{client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameSecret + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + + ar, err = c.client.AcquireTokenByCredential(ctx, opts.Scopes) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameSecret, err) + } + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ClientSecretCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go new file mode 100644 index 000000000000..c2b801c4a6db --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -0,0 +1,134 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "os" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential. +// These options may not apply to all credentials in the chain. +type DefaultAzureCredentialOptions struct { + azcore.ClientOptions + + // TenantID identifies the tenant the Azure CLI should authenticate in. + // Defaults to the CLI's default tenant, which is typically the home tenant of the user logged in to the CLI. + TenantID string +} + +// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. +// It combines credentials suitable for deployment with credentials suitable for local development. +// It attempts to authenticate with each of these credential types, in the following order, stopping when one provides a token: +// +// EnvironmentCredential +// ManagedIdentityCredential +// AzureCLICredential +// +// Consult the documentation for these credential types for more information on how they authenticate. +// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for +// every subsequent authentication. +type DefaultAzureCredential struct { + chain *ChainedTokenCredential +} + +// NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults. +func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) { + var creds []azcore.TokenCredential + var errorMessages []string + + if options == nil { + options = &DefaultAzureCredentialOptions{} + } + + envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ClientOptions: options.ClientOptions}) + if err == nil { + creds = append(creds, envCred) + } else { + errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err}) + } + + o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions} + if ID, ok := os.LookupEnv(azureClientID); ok { + o.ID = ClientID(ID) + } + msiCred, err := NewManagedIdentityCredential(o) + if err == nil { + creds = append(creds, msiCred) + msiCred.mic.imdsTimeout = time.Second + } else { + errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err}) + } + + cliCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{TenantID: options.TenantID}) + if err == nil { + creds = append(creds, cliCred) + } else { + errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err}) + } + + err = defaultAzureCredentialConstructorErrorHandler(len(creds), errorMessages) + if err != nil { + return nil, err + } + + chain, err := NewChainedTokenCredential(creds, nil) + if err != nil { + return nil, err + } + chain.name = "DefaultAzureCredential" + return &DefaultAzureCredential{chain: chain}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.chain.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*DefaultAzureCredential)(nil) + +func defaultAzureCredentialConstructorErrorHandler(numberOfSuccessfulCredentials int, errorMessages []string) (err error) { + errorMessage := strings.Join(errorMessages, "\n\t") + + if numberOfSuccessfulCredentials == 0 { + return errors.New(errorMessage) + } + + if len(errorMessages) != 0 { + log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", errorMessage) + } + + return nil +} + +// defaultCredentialErrorReporter is a substitute for credentials that couldn't be constructed. +// Its GetToken method always returns a credentialUnavailableError having the same message as +// the error that prevented constructing the credential. This ensures the message is present +// in the error returned by ChainedTokenCredential.GetToken() +type defaultCredentialErrorReporter struct { + credType string + err error +} + +func (d *defaultCredentialErrorReporter) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if _, ok := d.err.(*credentialUnavailableError); ok { + return azcore.AccessToken{}, d.err + } + return azcore.AccessToken{}, newCredentialUnavailableError(d.credType, d.err.Error()) +} + +var _ azcore.TokenCredential = (*defaultCredentialErrorReporter)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go new file mode 100644 index 000000000000..2e9b5438dbd0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameDeviceCode = "DeviceCodeCredential" + +// DeviceCodeCredentialOptions contains optional parameters for DeviceCodeCredential. +type DeviceCodeCredentialOptions struct { + azcore.ClientOptions + + // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant + // applications. + TenantID string + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + // UserPrompt controls how the credential presents authentication instructions. The credential calls + // this function with authentication details when it receives a device code. By default, the credential + // prints these details to stdout. + UserPrompt func(context.Context, DeviceCodeMessage) error +} + +func (o *DeviceCodeCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } + if o.UserPrompt == nil { + o.UserPrompt = func(ctx context.Context, dc DeviceCodeMessage) error { + fmt.Println(dc.Message) + return nil + } + } +} + +// DeviceCodeMessage contains the information a user needs to complete authentication. +type DeviceCodeMessage struct { + // UserCode is the user code returned by the service. + UserCode string `json:"user_code"` + // VerificationURL is the URL at which the user must authenticate. + VerificationURL string `json:"verification_uri"` + // Message is user instruction from Azure Active Directory. + Message string `json:"message"` +} + +// DeviceCodeCredential acquires tokens for a user via the device code flow, which has the +// user browse to an Azure Active Directory URL, enter a code, and authenticate. It's useful +// for authenticating a user in an environment without a web browser, such as an SSH session. +// If a web browser is available, InteractiveBrowserCredential is more convenient because it +// automatically opens a browser to the login page. +type DeviceCodeCredential struct { + client publicClient + userPrompt func(context.Context, DeviceCodeMessage) error + account public.Account +} + +// NewDeviceCodeCredential creates a DeviceCodeCredential. Pass nil to accept default options. +func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeCredential, error) { + cp := DeviceCodeCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions) + if err != nil { + return nil, err + } + return &DeviceCodeCredential{userPrompt: cp.UserPrompt, client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication. +// This method is called automatically by Azure SDK clients. +func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameDeviceCode + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, public.WithSilentAccount(c.account)) + if err == nil { + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + dc, err := c.client.AcquireTokenByDeviceCode(ctx, opts.Scopes) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameDeviceCode, err) + } + err = c.userPrompt(ctx, DeviceCodeMessage{ + UserCode: dc.Result.UserCode, + VerificationURL: dc.Result.VerificationURL, + Message: dc.Result.Message, + }) + if err != nil { + return azcore.AccessToken{}, err + } + ar, err = dc.AuthenticationResult(ctx) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameDeviceCode, err) + } + c.account = ar.Account + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go new file mode 100644 index 000000000000..b1871b4d4d74 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -0,0 +1,128 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN" + +// EnvironmentCredentialOptions contains optional parameters for EnvironmentCredential +type EnvironmentCredentialOptions struct { + azcore.ClientOptions +} + +// EnvironmentCredential authenticates a service principal with a secret or certificate, or a user with a password, depending +// on environment variable configuration. It reads configuration from these variables, in the following order: +// +// # Service principal with client secret +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_SECRET: one of the service principal's client secrets +// +// # Service principal with certificate +// +// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID. +// +// AZURE_CLIENT_ID: the service principal's client ID +// +// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the private key. +// +// AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the certificate file. +// +// # User with username and password +// +// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". +// +// AZURE_CLIENT_ID: client ID of the application the user will authenticate to +// +// AZURE_USERNAME: a username (usually an email address) +// +// AZURE_PASSWORD: the user's password +type EnvironmentCredential struct { + cred azcore.TokenCredential +} + +// NewEnvironmentCredential creates an EnvironmentCredential. Pass nil to accept default options. +func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*EnvironmentCredential, error) { + if options == nil { + options = &EnvironmentCredentialOptions{} + } + tenantID := os.Getenv(azureTenantID) + if tenantID == "" { + return nil, errors.New("missing environment variable AZURE_TENANT_ID") + } + clientID := os.Getenv(azureClientID) + if clientID == "" { + return nil, errors.New("missing environment variable " + azureClientID) + } + if clientSecret := os.Getenv(azureClientSecret); clientSecret != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientSecretCredential") + o := &ClientSecretCredentialOptions{ClientOptions: options.ClientOptions} + cred, err := NewClientSecretCredential(tenantID, clientID, clientSecret, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if certPath := os.Getenv(azureClientCertificatePath); certPath != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientCertificateCredential") + certData, err := os.ReadFile(certPath) + if err != nil { + return nil, fmt.Errorf(`failed to read certificate file "%s": %v`, certPath, err) + } + var password []byte + if v := os.Getenv(azureClientCertificatePassword); v != "" { + password = []byte(v) + } + certs, key, err := ParseCertificates(certData, password) + if err != nil { + return nil, fmt.Errorf(`failed to load certificate from "%s": %v`, certPath, err) + } + o := &ClientCertificateCredentialOptions{ClientOptions: options.ClientOptions} + if v, ok := os.LookupEnv(envVarSendCertChain); ok { + o.SendCertificateChain = v == "1" || strings.ToLower(v) == "true" + } + cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + if username := os.Getenv(azureUsername); username != "" { + if password := os.Getenv(azurePassword); password != "" { + log.Write(EventAuthentication, "EnvironmentCredential will authenticate with UsernamePasswordCredential") + o := &UsernamePasswordCredentialOptions{ClientOptions: options.ClientOptions} + cred, err := NewUsernamePasswordCredential(tenantID, clientID, username, password, o) + if err != nil { + return nil, err + } + return &EnvironmentCredential{cred: cred}, nil + } + return nil, errors.New("no value for AZURE_PASSWORD") + } + return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set") +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return c.cred.GetToken(ctx, opts) +} + +var _ azcore.TokenCredential = (*EnvironmentCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go new file mode 100644 index 000000000000..6695f1b70e4c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -0,0 +1,124 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" +) + +// getResponseFromError retrieves the response carried by +// an AuthenticationFailedError or MSAL CallErr, if any +func getResponseFromError(err error) *http.Response { + var a *AuthenticationFailedError + var c msal.CallErr + var res *http.Response + if errors.As(err, &c) { + res = c.Resp + } else if errors.As(err, &a) { + res = a.RawResponse + } + return res +} + +// AuthenticationFailedError indicates an authentication request has failed. +type AuthenticationFailedError struct { + // RawResponse is the HTTP response motivating the error, if available. + RawResponse *http.Response + + credType string + message string +} + +func newAuthenticationFailedError(credType string, message string, resp *http.Response) error { + return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp} +} + +func newAuthenticationFailedErrorFromMSALError(credType string, err error) error { + res := getResponseFromError(err) + return newAuthenticationFailedError(credType, err.Error(), res) +} + +// Error implements the error interface. Note that the message contents are not contractual and can change over time. +func (e *AuthenticationFailedError) Error() string { + if e.RawResponse == nil { + return e.credType + ": " + e.message + } + msg := &bytes.Buffer{} + fmt.Fprintf(msg, e.credType+" authentication failed\n") + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := io.ReadAll(e.RawResponse.Body) + e.RawResponse.Body.Close() + if err != nil { + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + e.RawResponse.Body = io.NopCloser(bytes.NewReader(body)) + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + } else { + fmt.Fprint(msg, "Response contained no body") + } + fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------") + var anchor string + switch e.credType { + case credNameAzureCLI: + anchor = "azure-cli" + case credNameCert: + anchor = "client-cert" + case credNameSecret: + anchor = "client-secret" + case credNameManagedIdentity: + anchor = "managed-id" + case credNameUserPassword: + anchor = "username-password" + } + if anchor != "" { + fmt.Fprintf(msg, "To troubleshoot, visit https://aka.ms/azsdk/go/identity/troubleshoot#%s", anchor) + } + return msg.String() +} + +// NonRetriable indicates the request which provoked this error shouldn't be retried. +func (*AuthenticationFailedError) NonRetriable() { + // marker method +} + +var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) + +// credentialUnavailableError indicates a credential can't attempt +// authentication because it lacks required data or state. +type credentialUnavailableError struct { + credType string + message string +} + +func newCredentialUnavailableError(credType, message string) error { + return &credentialUnavailableError{credType: credType, message: message} +} + +func (e *credentialUnavailableError) Error() string { + return e.credType + ": " + e.message +} + +// NonRetriable indicates that this error should not be retried. +func (e *credentialUnavailableError) NonRetriable() { + // marker method +} + +var _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go new file mode 100644 index 000000000000..9032ae9886a5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -0,0 +1,89 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameBrowser = "InteractiveBrowserCredentiall" + +// InteractiveBrowserCredentialOptions contains optional parameters for InteractiveBrowserCredential. +type InteractiveBrowserCredentialOptions struct { + azcore.ClientOptions + + // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // "organizations" tenant, which can authenticate work and school accounts. + TenantID string + // ClientID is the ID of the application users will authenticate to. + // Defaults to the ID of an Azure development application. + ClientID string + // RedirectURL will be supported in a future version but presently doesn't work: https://github.com/Azure/azure-sdk-for-go/issues/15632. + // Applications which have "http://localhost" registered as a redirect URL need not set this option. + RedirectURL string +} + +func (o *InteractiveBrowserCredentialOptions) init() { + if o.TenantID == "" { + o.TenantID = organizationsTenantID + } + if o.ClientID == "" { + o.ClientID = developerSignOnClientID + } +} + +// InteractiveBrowserCredential opens a browser to interactively authenticate a user. +type InteractiveBrowserCredential struct { + client publicClient + options InteractiveBrowserCredentialOptions + account public.Account +} + +// NewInteractiveBrowserCredential constructs a new InteractiveBrowserCredential. Pass nil to accept default options. +func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOptions) (*InteractiveBrowserCredential, error) { + cp := InteractiveBrowserCredentialOptions{} + if options != nil { + cp = *options + } + cp.init() + c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions) + if err != nil { + return nil, err + } + return &InteractiveBrowserCredential{options: cp, client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameBrowser + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, public.WithSilentAccount(c.account)) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + + o := []public.InteractiveAuthOption{} + if c.options.RedirectURL != "" { + o = append(o, public.WithRedirectURI(c.options.RedirectURL)) + } + ar, err = c.client.AcquireTokenInteractive(ctx, opts.Scopes, o...) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameBrowser, err) + } + c.account = ar.Account + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go new file mode 100644 index 000000000000..569453e4622d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "fmt" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// EventAuthentication entries contain information about authentication. +// This includes information like the names of environment variables +// used when obtaining credentials and the type of credential used. +const EventAuthentication log.Event = "Authentication" + +func logGetTokenSuccess(cred azcore.TokenCredential, opts policy.TokenRequestOptions) { + if !log.Should(EventAuthentication) { + return + } + scope := strings.Join(opts.Scopes, ", ") + msg := fmt.Sprintf("%T.GetToken() acquired a token for scope %s\n", cred, scope) + log.Write(EventAuthentication, msg) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go new file mode 100644 index 000000000000..c9b72663c276 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -0,0 +1,401 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const ( + arcIMDSEndpoint = "IMDS_ENDPOINT" + identityEndpoint = "IDENTITY_ENDPOINT" + identityHeader = "IDENTITY_HEADER" + identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" + headerMetadata = "Metadata" + imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + msiEndpoint = "MSI_ENDPOINT" + imdsAPIVersion = "2018-02-01" + azureArcAPIVersion = "2019-08-15" + serviceFabricAPIVersion = "2019-07-01-preview" + + qpClientID = "client_id" + qpResID = "mi_res_id" +) + +type msiType int + +const ( + msiTypeAppService msiType = iota + msiTypeAzureArc + msiTypeCloudShell + msiTypeIMDS + msiTypeServiceFabric +) + +// managedIdentityClient provides the base for authenticating in managed identity environments +// This type includes an runtime.Pipeline and TokenCredentialOptions. +type managedIdentityClient struct { + pipeline runtime.Pipeline + msiType msiType + endpoint string + id ManagedIDKind + imdsTimeout time.Duration +} + +type wrappedNumber json.Number + +func (n *wrappedNumber) UnmarshalJSON(b []byte) error { + c := string(b) + if c == "\"\"" { + return nil + } + return json.Unmarshal(b, (*json.Number)(n)) +} + +// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS +func setIMDSRetryOptionDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = 5 + } + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 1 * time.Minute + } + if o.RetryDelay == 0 { + o.RetryDelay = 2 * time.Second + } + if o.StatusCodes == nil { + o.StatusCodes = []int{ + // IMDS docs recommend retrying 404, 429 and all 5xx + // https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling + http.StatusNotFound, // 404 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusNotImplemented, // 501 + http.StatusBadGateway, // 502 + http.StatusGatewayTimeout, // 504 + http.StatusHTTPVersionNotSupported, // 505 + http.StatusVariantAlsoNegotiates, // 506 + http.StatusInsufficientStorage, // 507 + http.StatusLoopDetected, // 508 + http.StatusNotExtended, // 510 + http.StatusNetworkAuthenticationRequired, // 511 + } + } + if o.TryTimeout == 0 { + o.TryTimeout = 1 * time.Minute + } +} + +// newManagedIdentityClient creates a new instance of the ManagedIdentityClient with the ManagedIdentityCredentialOptions +// that are passed into it along with a default pipeline. +// options: ManagedIdentityCredentialOptions configure policies for the pipeline and the authority host that +// will be used to retrieve tokens and authenticate +func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*managedIdentityClient, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + cp := options.ClientOptions + c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS} + env := "IMDS" + if endpoint, ok := os.LookupEnv(identityEndpoint); ok { + if _, ok := os.LookupEnv(identityHeader); ok { + if _, ok := os.LookupEnv(identityServerThumbprint); ok { + env = "Service Fabric" + c.endpoint = endpoint + c.msiType = msiTypeServiceFabric + } else { + env = "App Service" + c.endpoint = endpoint + c.msiType = msiTypeAppService + } + } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok { + env = "Azure Arc" + c.endpoint = endpoint + c.msiType = msiTypeAzureArc + } + } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { + env = "Cloud Shell" + c.endpoint = endpoint + c.msiType = msiTypeCloudShell + } else { + setIMDSRetryOptionDefaults(&cp.Retry) + } + c.pipeline = runtime.NewPipeline(component, version, runtime.PipelineOptions{}, &cp) + + if log.Should(EventAuthentication) { + log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) + } + + return &c, nil +} + +// provideToken acquires a token for MSAL's confidential.Client, which caches the token +func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) { + result := confidential.TokenProviderResult{} + tk, err := c.authenticate(ctx, c.id, params.Scopes) + if err == nil { + result.AccessToken = tk.Token + result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds()) + } + return result, err +} + +// authenticate acquires an access token +func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { + var cancel context.CancelFunc + if c.imdsTimeout > 0 && c.msiType == msiTypeIMDS { + ctx, cancel = context.WithTimeout(ctx, c.imdsTimeout) + defer cancel() + } + + msg, err := c.createAuthRequest(ctx, id, scopes) + if err != nil { + return azcore.AccessToken{}, err + } + + resp, err := c.pipeline.Do(msg) + if err != nil { + if cancel != nil && errors.Is(err, context.DeadlineExceeded) { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "IMDS token request timed out") + } + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil) + } + + // got a response, remove the IMDS timeout so future requests use the transport's configuration + c.imdsTimeout = 0 + + if runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return c.createAccessToken(resp) + } + + if c.msiType == msiTypeIMDS && resp.StatusCode == 400 { + if id != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp) + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "no default identity is assigned to this resource") + } + + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp) +} + +func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) { + value := struct { + // these are the only fields that we use + Token string `json:"access_token,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` + ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid + ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string + }{} + if err := runtime.UnmarshalAsJSON(res, &value); err != nil { + return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) + } + if value.ExpiresIn != "" { + expiresIn, err := json.Number(value.ExpiresIn).Int64() + if err != nil { + return azcore.AccessToken{}, err + } + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil + } + switch v := value.ExpiresOn.(type) { + case float64: + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil + case string: + if expiresOn, err := strconv.Atoi(v); err == nil { + return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil + } + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res) + default: + msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res) + } +} + +func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + switch c.msiType { + case msiTypeIMDS: + return c.createIMDSAuthRequest(ctx, id, scopes) + case msiTypeAppService: + return c.createAppServiceAuthRequest(ctx, id, scopes) + case msiTypeAzureArc: + // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service + key, err := c.getAzureArcSecretKey(ctx, scopes) + if err != nil { + msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err) + return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil) + } + return c.createAzureArcAuthRequest(ctx, id, scopes, key) + case msiTypeServiceFabric: + return c.createServiceFabricAuthRequest(ctx, id, scopes) + case msiTypeCloudShell: + return c.createCloudShellAuthRequest(ctx, id, scopes) + default: + return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment") + } +} + +func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", imdsAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader)) + q := request.Raw().URL.Query() + q.Add("api-version", "2019-08-01") + q.Add("resource", scopes[0]) + if id != nil { + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + q := request.Raw().URL.Query() + request.Raw().Header.Set("Accept", "application/json") + request.Raw().Header.Set("Secret", os.Getenv(identityHeader)) + q.Add("api-version", serviceFabricAPIVersion) + q.Add("resource", strings.Join(scopes, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) { + // create the request to retreive the secret key challenge provided by the HIMDS service + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return "", err + } + request.Raw().Header.Set(headerMetadata, "true") + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + request.Raw().URL.RawQuery = q.Encode() + // send the initial request to get the short-lived secret key + response, err := c.pipeline.Do(request) + if err != nil { + return "", err + } + // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location + // of the secret key file. Any other status code indicates an error in the request. + if response.StatusCode != 401 { + msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode) + return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response) + } + header := response.Header.Get("WWW-Authenticate") + if len(header) == 0 { + return "", errors.New("did not receive a value from WWW-Authenticate header") + } + // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key + pos := strings.LastIndex(header, "=") + if pos == -1 { + return "", fmt.Errorf("did not receive a correct value from WWW-Authenticate header: %s", header) + } + key, err := os.ReadFile(header[pos+1:]) + if err != nil { + return "", fmt.Errorf("could not read file (%s) contents: %v", header[pos+1:], err) + } + return string(key), nil +} + +func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) + q := request.Raw().URL.Query() + q.Add("api-version", azureArcAPIVersion) + q.Add("resource", strings.Join(resources, " ")) + if id != nil { + log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + +func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodPost, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set(headerMetadata, "true") + data := url.Values{} + data.Set("resource", strings.Join(scopes, " ")) + dataEncoded := data.Encode() + body := streaming.NopCloser(strings.NewReader(dataEncoded)) + if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil { + return nil, err + } + if id != nil { + log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") + q := request.Raw().URL.Query() + if id.idKind() == miResourceID { + q.Add(qpResID, id.String()) + } else { + q.Add(qpClientID, id.String()) + } + } + return request, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go new file mode 100644 index 000000000000..18078171ee81 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -0,0 +1,125 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +const credNameManagedIdentity = "ManagedIdentityCredential" + +type managedIdentityIDKind int + +const ( + miClientID managedIdentityIDKind = 0 + miResourceID managedIdentityIDKind = 1 +) + +// ManagedIDKind identifies the ID of a managed identity as either a client or resource ID +type ManagedIDKind interface { + fmt.Stringer + idKind() managedIdentityIDKind +} + +// ClientID is the client ID of a user-assigned managed identity. +type ClientID string + +func (ClientID) idKind() managedIdentityIDKind { + return miClientID +} + +// String returns the string value of the ID. +func (c ClientID) String() string { + return string(c) +} + +// ResourceID is the resource ID of a user-assigned managed identity. +type ResourceID string + +func (ResourceID) idKind() managedIdentityIDKind { + return miResourceID +} + +// String returns the string value of the ID. +func (r ResourceID) String() string { + return string(r) +} + +// ManagedIdentityCredentialOptions contains optional parameters for ManagedIdentityCredential. +type ManagedIdentityCredentialOptions struct { + azcore.ClientOptions + + // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity + // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that + // some platforms don't accept resource IDs. + ID ManagedIDKind +} + +// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. +// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a +// user-assigned identity. See Azure Active Directory documentation for more information about managed identities: +// https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +type ManagedIdentityCredential struct { + client confidentialClient + mic *managedIdentityClient +} + +// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. +func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*ManagedIdentityCredential, error) { + if options == nil { + options = &ManagedIdentityCredentialOptions{} + } + mic, err := newManagedIdentityClient(options) + if err != nil { + return nil, err + } + cred := confidential.NewCredFromTokenProvider(mic.provideToken) + if err != nil { + return nil, err + } + // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key. + // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL. + clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY" + if options.ID != nil { + clientID = options.ID.String() + } + c, err := confidential.New(clientID, cred) + if err != nil { + return nil, err + } + return &ManagedIdentityCredential{client: c, mic: mic}, nil +} + +// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. +func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) != 1 { + err := errors.New(credNameManagedIdentity + ": GetToken() requires exactly one scope") + return azcore.AccessToken{}, err + } + // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + scopes := []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} + ar, err := c.client.AcquireTokenSilent(ctx, scopes) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, nil + } + ar, err = c.client.AcquireTokenByCredential(ctx, scopes) + if err != nil { + return azcore.AccessToken{}, err + } + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go new file mode 100644 index 000000000000..2ab248c3c616 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -0,0 +1,68 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +const credNameUserPassword = "UsernamePasswordCredential" + +// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential. +type UsernamePasswordCredentialOptions struct { + azcore.ClientOptions +} + +// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, +// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible +// with any form of multi-factor authentication, and the application must already have user or admin consent. +// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts. +type UsernamePasswordCredential struct { + client publicClient + username string + password string + account public.Account +} + +// NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user +// will authenticate to. Pass nil for options to accept defaults. +func NewUsernamePasswordCredential(tenantID string, clientID string, username string, password string, options *UsernamePasswordCredentialOptions) (*UsernamePasswordCredential, error) { + if options == nil { + options = &UsernamePasswordCredentialOptions{} + } + c, err := getPublicClient(clientID, tenantID, &options.ClientOptions) + if err != nil { + return nil, err + } + return &UsernamePasswordCredential{username: username, password: password, client: c}, nil +} + +// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(opts.Scopes) == 0 { + return azcore.AccessToken{}, errors.New(credNameUserPassword + ": GetToken() requires at least one scope") + } + ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, public.WithSilentAccount(c.account)) + if err == nil { + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + } + ar, err = c.client.AcquireTokenByUsernamePassword(ctx, opts.Scopes, c.username, c.password) + if err != nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameUserPassword, err) + } + c.account = ar.Account + logGetTokenSuccess(c, opts) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go new file mode 100644 index 000000000000..9757589d166e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -0,0 +1,15 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +const ( + // UserAgent is the string to be used in the user agent string when making requests. + component = "azidentity" + + // Version is the semantic version (see http://semver.org) of this module. + version = "v1.2.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt new file mode 100644 index 000000000000..48ea6616b5b8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go new file mode 100644 index 000000000000..245af7d2bec4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag + +import ( + "fmt" + "runtime" + "strings" +) + +// Caller returns the file and line number of a frame on the caller's stack. +// If the funtion fails an empty string is returned. +// skipFrames - the number of frames to skip when determining the caller. +// Passing a value of 0 will return the immediate caller of this function. +func Caller(skipFrames int) string { + if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { + // the skipFrames + 1 is to skip ourselves + frame := runtime.FuncForPC(pc) + return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line) + } + return "" +} + +// StackTrace returns a formatted stack trace string. +// If the funtion fails an empty string is returned. +// skipFrames - the number of stack frames to skip before composing the trace string. +// totalFrames - the maximum number of stack frames to include in the trace string. +func StackTrace(skipFrames, totalFrames int) string { + pcCallers := make([]uintptr, totalFrames) + if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 { + return "" + } + frames := runtime.CallersFrames(pcCallers) + sb := strings.Builder{} + for { + frame, more := frames.Next() + sb.WriteString(frame.Function) + sb.WriteString("()\n\t") + sb.WriteString(frame.File) + sb.WriteRune(':') + sb.WriteString(fmt.Sprintf("%d\n", frame.Line)) + if !more { + break + } + } + return sb.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go new file mode 100644 index 000000000000..66bf13e5f04b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go new file mode 100644 index 000000000000..8c6eacb618a3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go new file mode 100644 index 000000000000..ade7b348e303 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -0,0 +1,16 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo + +// NonRetriable represents a non-transient error. This works in +// conjunction with the retry policy, indicating that the error condition +// is idempotent, so no retries will be attempted. +// Use errors.As() to access this interface in the error chain. +type NonRetriable interface { + error + NonRetriable() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go new file mode 100644 index 000000000000..d7876d297ae9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go new file mode 100644 index 000000000000..4f1dcf1b78a6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log + +import ( + "fmt" + "os" + "time" +) + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Event is used to group entries. Each group can be toggled on or off. +type Event string + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +func SetEvents(cls ...Event) { + log.cls = cls +} + +// SetListener will set the Logger to write to the specified listener. +func SetListener(lst func(Event, string)) { + log.lst = lst +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// END PUBLIC SURFACE AREA +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. +func Should(cls Event) bool { + if log.lst == nil { + return false + } + if log.cls == nil || len(log.cls) == 0 { + return true + } + for _, c := range log.cls { + if c == cls { + return true + } + } + return false +} + +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. +func Write(cls Event, message string) { + if !Should(cls) { + return + } + log.lst(cls, message) +} + +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls Event, format string, a ...interface{}) { + if !Should(cls) { + return + } + log.lst(cls, fmt.Sprintf(format, a...)) +} + +// TestResetEvents is used for TESTING PURPOSES ONLY. +func TestResetEvents() { + log.cls = nil +} + +// logger controls which events to log and writing to the underlying log. +type logger struct { + cls []Event + lst func(Event, string) +} + +// the process-wide logger +var log logger + +func init() { + initLogging() +} + +// split out for testing purposes +func initLogging() { + if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" { + // cls could be enhanced to support a comma-delimited list of log events + log.lst = func(cls Event, msg string) { + // simple console logger, it writes to stderr in the following format: + // [time-stamp] Event: message + fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go new file mode 100644 index 000000000000..238ef42ed03a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -0,0 +1,123 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package temporal + +import ( + "sync" + "time" +) + +// AcquireResource abstracts a method for refreshing a temporal resource. +type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) + +// Resource is a temporal resource (usually a credential) that requires periodic refreshing. +type Resource[TResource, TState any] struct { + // cond is used to synchronize access to the shared resource embodied by the remaining fields + cond *sync.Cond + + // acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource + acquiring bool + + // resource contains the value of the shared resource + resource TResource + + // expiration indicates when the shared resource expires; it is 0 if the resource was never acquired + expiration time.Time + + // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource + lastAttempt time.Time + + // acquireResource is the callback function that actually acquires the resource + acquireResource AcquireResource[TResource, TState] +} + +// NewResource creates a new Resource that uses the specified AcquireResource for refreshing. +func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { + return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} +} + +// Get returns the underlying resource. +// If the resource is fresh, no refresh is performed. +func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { + // If the resource is expiring within this time window, update it eagerly. + // This allows other threads/goroutines to keep running by using the not-yet-expired + // resource value while one thread/goroutine updates the resource. + const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration + const backoff = 30 * time.Second // Minimum wait time between eager update attempts + + now, acquire, expired := time.Now(), false, false + + // acquire exclusive lock + er.cond.L.Lock() + resource := er.resource + + for { + expired = er.expiration.IsZero() || er.expiration.Before(now) + if expired { + // The resource was never acquired or has expired + if !er.acquiring { + // If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // Getting here means that this thread/goroutine will wait for the updated resource + } else if er.expiration.Add(-window).Before(now) { + // The resource is valid but is expiring within the time window + if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { + // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted + // to do so within the last 30 seconds, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // This thread/goroutine will use the existing resource value while another updates it + resource = er.resource + break + } else { + // The resource is not close to expiring, this thread/goroutine should use its current value + resource = er.resource + break + } + // If we get here, wait for the new resource value to be acquired/updated + er.cond.Wait() + } + er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked + + var err error + if acquire { + // This thread/goroutine has been selected to acquire/update the resource + var expiration time.Time + var newValue TResource + er.lastAttempt = now + newValue, expiration, err = er.acquireResource(state) + + // Atomically, update the shared resource's new value & expiration. + er.cond.L.Lock() + if err == nil { + // Update resource & expiration, return the new value + resource = newValue + er.resource, er.expiration = resource, expiration + } else if !expired { + // An eager update failed. Discard the error and return the current--still valid--resource value + err = nil + } + er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource + + // Wake up any waiting threads/goroutines since there is a resource they can ALL use + er.cond.L.Unlock() + er.cond.Broadcast() + } + return resource, err // Return the resource this thread/goroutine can use +} + +// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get(). +func (er *Resource[TResource, TState]) Expire() { + er.cond.L.Lock() + defer er.cond.L.Unlock() + + // Reset the expiration as if we never got this resource to begin with + er.expiration = time.Time{} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go new file mode 100644 index 000000000000..a3824bee8b5b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go new file mode 100644 index 000000000000..278ac9cd1c2c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go @@ -0,0 +1,76 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid + +import ( + "crypto/rand" + "errors" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedRFC4122 byte = 0x40 +) + +// A UUID representation compliant with specification in RFC4122 document. +type UUID [16]byte + +// New returns a new UUID using the RFC4122 algorithm. +func New() (UUID, error) { + u := UUID{} + // Set all bits to pseudo-random values. + // NOTE: this takes a process-wide lock + _, err := rand.Read(u[:]) + if err != nil { + return u, err + } + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return u, nil +} + +// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format. +func (u UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID. +func Parse(s string) (UUID, error) { + var uuid UUID + // ensure format + switch len(s) { + case 36: + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 38: + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + s = s[1:37] + default: + return uuid, errors.New("invalid UUID format") + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + // parse chunks + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + b, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return uuid, fmt.Errorf("invalid UUID format: %s", err) + } + uuid[i] = byte(b) + } + return uuid, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md new file mode 100644 index 000000000000..2c2008679be8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -0,0 +1,87 @@ +# Release History + +## 0.5.1 (2022-10-11) + +### Bugs Fixed + +* `GetSASURL()`: for container and blob clients, don't add a forward slash before the query string +* Fixed issue [#19249](https://github.com/Azure/azure-sdk-for-go/issues/19249) by increasing service version to '2020-02-10'. + +### Other Changes + +* Improved docs for client constructors. +* Updating azcore version to 1.1.4 + +## 0.5.0 (2022-09-29) + +### Breaking Changes + +* Complete architectural change for better user experience. Please view the [README](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob#readme) + +### Features Added + +* Added [UserDelegationCredential](https://learn.microsoft.com/rest/api/storageservices/create-user-delegation-sas) which resolves [#18976](https://github.com/Azure/azure-sdk-for-go/issues/18976), [#16916](https://github.com/Azure/azure-sdk-for-go/issues/16916), [#18977](https://github.com/Azure/azure-sdk-for-go/issues/18977) +* Added [Restore Container API](https://learn.microsoft.com/rest/api/storageservices/restore-container). + +### Bugs Fixed + +* Fixed issue [#18767](https://github.com/Azure/azure-sdk-for-go/issues/18767) +* Fix deadlock when error writes are slow [#16937](https://github.com/Azure/azure-sdk-for-go/pull/16937) + +## 0.4.1 (2022-05-12) + +### Other Changes + +* Updated to latest `azcore` and `internal` modules + +## 0.4.0 (2022-04-19) + +### Breaking Changes + +* Fixed Issue #17150 : Renaming/refactoring high level methods. +* Fixed Issue #16972 : Constructors should return clients by reference. +* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags + remains the same. + +### Bugs Fixed + +* Fixed Issue #17515 : SetTags options bag missing leaseID. +* Fixed Issue #17423 : Drop "Type" suffix from `GeoReplicationStatusType`. +* Fixed Issue #17335 : Nil pointer exception when passing nil options bag in `ListBlobsFlat` API call. +* Fixed Issue #17188 : `BlobURLParts` not supporting VersionID +* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods + ignoring the options bag. +* Fixed Issue #16920 : Fixing error handling example. +* Fixed Issue #16786 : Refactoring of autorest code generation definition and adding necessary transformations. +* Fixed Issue #16679 : Response parsing issue in List blobs API. + +## 0.3.0 (2022-02-09) + +### Breaking Changes + +* Updated to latest `azcore`. Public surface area is unchanged. +* [#16978](https://github.com/Azure/azure-sdk-for-go/pull/16978): The `DownloadResponse.Body` parameter is + now `*RetryReaderOptions`. + +### Bugs Fixed + +* Fixed Issue #16193 : `azblob.GetSASToken` wrong signed resource. +* Fixed Issue #16223 : `HttpRange` does not expose its fields. +* Fixed Issue #16254 : Issue passing reader to upload `BlockBlobClient` +* Fixed Issue #16295 : Problem with listing blobs by using of `ListBlobsHierarchy()` +* Fixed Issue #16542 : Empty `StorageError` in the Azurite environment +* Fixed Issue #16679 : Unable to access Metadata when listing blobs +* Fixed Issue #16816 : `ContainerClient.GetSASToken` doesn't allow list permission. +* Fixed Issue #16988 : Too many arguments in call to `runtime.NewResponseError` + +## 0.2.0 (2021-11-03) + +### Breaking Changes + +* Clients now have one constructor per authentication method + +## 0.1.0 (2021-09-13) + +### Features Added + +* This is the initial preview release of the `azblob` library diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt new file mode 100644 index 000000000000..d1ca00f20a89 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md new file mode 100644 index 000000000000..467fe36cd90b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -0,0 +1,274 @@ +# Azure Blob Storage SDK for Go + +> Server Version: 2020-10-02 + +Azure Blob storage is Microsoft's object storage solution for the cloud. Blob +storage is optimized for storing massive amounts of unstructured data. +Unstructured data is data that does not adhere to a particular data model or +definition, such as text or binary data. + +[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] + +## Getting started + +### Install the package + +Install the Azure Blob Storage SDK for Go with [go get][goget]: + +```Powershell +go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +``` + +If you're going to authenticate with Azure Active Directory (recommended), install the [azidentity][azidentity] module. +```Powershell +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +### Prerequisites + +A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases). + +You need an [Azure subscription][azure_sub] and a +[Storage Account][storage_account_docs] to use this package. + +To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal], +[Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. +Here's an example using the Azure CLI: + +```Powershell +az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS +``` + +### Authenticate the client + +In order to interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. + +```go +// create a credential for authenticating with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle err + +// create an azblob.Client for the specified storage account that uses the above credential +client, err := azblob.NewClient("https://MYSTORAGEACCOUNT.blob.core.windows.net/", cred, nil) +// TODO: handle err +``` + +Learn more about enabling Azure Active Directory for authentication with Azure Storage in [our documentation][storage_ad] and [our samples](#next-steps). + +## Key concepts + +Blob storage is designed for: + +- Serving images or documents directly to a browser. +- Storing files for distributed access. +- Streaming video and audio. +- Writing to log files. +- Storing data for backup and restore, disaster recovery, and archiving. +- Storing data for analysis by an on-premises or Azure-hosted service. + +Blob storage offers three types of resources: + +- The _storage account_ +- One or more _containers_ in a storage account +- One ore more _blobs_ in a container + +Instances of the `azblob.Client` type provide methods for manipulating containers and blobs within a storage account. +The storage account is specified when the `azblob.Client` is constructed. +Use the appropriate client constructor function for the authentication mechanism you wish to use. + +Learn more about options for authentication _(including Connection Strings, Shared Key, Shared Access Signatures (SAS), Azure Active Directory (AAD), and anonymous public access)_ [in our examples.](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go) + +### Goroutine safety +We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines. + +### About blob metadata +Blob metadata name/value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. + +### Additional concepts + +[Client options](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy#ClientOptions) | +[Accessing the response](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime#WithCaptureResponse) | +[Handling failures](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError) | +[Logging](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/log) + + +## Examples + +### Uploading a blob + +```go +const ( + account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/" + containerName = "sample-container" + blobName = "sample-blob" + sampleFile = "path/to/sample/file" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a client for the specified storage account +client, err := azblob.NewClient(account, cred, nil) +// TODO: handle error + +// open the file for reading +file, err := os.OpenFile(sampleFile, os.O_RDONLY, 0) +// TODO: handle error +defer file.Close() + +// upload the file to the specified container with the specified blob name +_, err = client.UploadFile(context.TODO(), containerName, blobName, file, nil) +// TODO: handle error +``` + +### Downloading a blob + +```go +// this example accesses a public blob via anonymous access, so no credentials are required +client, err := azblob.NewClientWithNoCredential("https://azurestoragesamples.blob.core.windows.net/", nil) +// TODO: handle error + +// create or open a local file where we can download the blob +file, err := os.Create("cloud.jpg") +// TODO: handle error +defer file.Close() + +// download the blob +_, err = client.DownloadFile(context.TODO(), "samples", "cloud.jpg", file, nil) +// TODO: handle error +``` + +### Enumerating blobs + +```go +const ( + account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/" + containerName = "sample-container" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a client for the specified storage account +client, err := azblob.NewClient(account, cred, nil) +// TODO: handle error + +// blob listings are returned across multiple pages +pager := client.NewListBlobsFlatPager(containerName, nil) + +// continue fetching pages until no more remain +for pager.More() { + // advance to the next page + page, err := pager.NextPage(context.TODO()) + // TODO: handle error + + // print the blob names for this page + for _, blob := range page.Segment.BlobItems { + fmt.Println(*blob.Name) + } +} +``` + +## Troubleshooting + +All Blob service operations will return an +[*azcore.ResponseError][azcore_response_error] on failure with a +populated `ErrorCode` field. Many of these errors are recoverable. +The [bloberror][blob_error] package provides the possible Storage error codes +along with various helper facilities for error handling. + +```go +const ( + connectionString = "" + containerName = "sample-container" +) + +// create a client with the provided connection string +client, err := azblob.NewClientFromConnectionString(connectionString, nil) +// TODO: handle error + +// try to delete the container, avoiding any potential race conditions with an in-progress or completed deletion +_, err = client.DeleteContainer(context.TODO(), containerName, nil) + +if bloberror.HasCode(err, bloberror.ContainerBeingDeleted, bloberror.ContainerNotFound) { + // ignore any errors if the container is being deleted or already has been deleted +} else if err != nil { + // TODO: some other error +} +``` + +## Next steps + +Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more. + +### Specialized clients + +The Azure Blob Storage SDK for Go also provides specialized clients in various subpackages. +Use these clients when you need to interact with a specific kind of blob. +Learn more about the various types of blobs from the following links. + +- [appendblob][append_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-append-blobs) +- [blockblob][block_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-block-blobs) +- [pageblob][page_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-page-blobs) + +The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more. + +The [lease][lease] package contains clients for managing leases on blobs and containers. Please see the [reference docs](https://docs.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases. + +The [container][container] package contains APIs specific to containers. This includes APIs setting access policies or properties, and more. + +The [service][service] package contains APIs specific to blob service. This includes APIs for manipulating containers, retrieving account information, and more. + +The [sas][sas] package contains utilities to aid in the creation and manipulation of Shared Access Signature tokens. +See the package's documentation for more information. + +## Contributing + +See the [Storage CONTRIBUTING.md][storage_contrib] for details on building, +testing, and contributing to this library. + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. For +details, visit [cla.microsoft.com][cla]. + +This project has adopted the [Microsoft Open Source Code of Conduct][coc]. +For more information see the [Code of Conduct FAQ][coc_faq] +or contact [opencode@microsoft.com][coc_contact] with any +additional questions or comments. + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fstorage%2Fazblob%2FREADME.png) + + +[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob +[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/blob-service-rest-api +[product_docs]: https://docs.microsoft.com/azure/storage/blobs/storage-blobs-overview +[godevdl]: https://go.dev/dl/ +[goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them +[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview +[storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell +[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli +[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[azure_cli]: https://docs.microsoft.com/cli/azure +[azure_sub]: https://azure.microsoft.com/free/ +[azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[storage_ad]: https://docs.microsoft.com/azure/storage/common/storage-auth-aad +[azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError +[samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go +[append_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/appendblob/client.go +[blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blob/client.go +[blob_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/bloberror/error_codes.go +[block_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blockblob/client.go +[container]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/container/client.go +[lease]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/lease +[page_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/pageblob/client.go +[sas]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/sas +[service]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/service/client.go +[storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md +[cla]: https://cla.microsoft.com +[coc]: https://opensource.microsoft.com/codeofconduct/ +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[coc_contact]: mailto:opencode@microsoft.com diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go new file mode 100644 index 000000000000..a6c204ac6618 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -0,0 +1,276 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package appendblob + +import ( + "context" + "io" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a client to an Azure Storage append blob; +type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +// BlobClient returns the embedded blob client for this AppendBlob client. +func (ab *Client) BlobClient() *blob.Client { + innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) + return (*blob.Client)(innerBlob) +} + +func (ab *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) +} + +func (ab *Client) generated() *generated.AppendBlobClient { + _, appendBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) + return appendBlob +} + +// URL returns the URL endpoint used by the Client object. +func (ab *Client) URL() string { + return ab.generated().Endpoint() +} + +// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(ab.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (ab *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(ab.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil +} + +// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (ab *Client) Create(ctx context.Context, o *CreateOptions) (CreateResponse, error) { + opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() + resp, err := ab.generated().Create(ctx, 0, opts, httpHeaders, leaseAccessConditions, cpkInfo, + cpkScopeInfo, modifiedAccessConditions) + return resp, err +} + +// AppendBlock writes a stream to a new block of data to the end of the existing append blob. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. +func (ab *Client) AppendBlock(ctx context.Context, body io.ReadSeekCloser, o *AppendBlockOptions) (AppendBlockResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return AppendBlockResponse{}, nil + } + + appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := o.format() + + resp, err := ab.generated().AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) + + return resp, err +} + +// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. +func (ab *Client) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlockFromURLOptions) (AppendBlockFromURLResponse, error) { + appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format() + + // content length should be 0 on * from URL. always. It's a 400 if it isn't. + resp, err := ab.generated().AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, + leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + return resp, err +} + +// Seal - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only. +// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal +func (ab *Client) Seal(ctx context.Context, o *SealOptions) (SealResponse, error) { + leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := o.format() + resp, err := ab.generated().Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions) + return resp, err +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (ab *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return ab.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (ab *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return ab.BlobClient().Undelete(ctx, o) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (ab *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return ab.BlobClient().SetTier(ctx, tier, o) +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (ab *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return ab.BlobClient().GetProperties(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return ab.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (ab *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return ab.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (ab *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return ab.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (ab *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return ab.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (ab *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return ab.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (ab *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return ab.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (ab *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return ab.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return ab.BlobClient().CopyFromURL(ctx, copySource, o) +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (ab *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return ab.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (ab *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return ab.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (ab *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return ab.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go new file mode 100644 index 000000000000..69faf6ad6546 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go @@ -0,0 +1,166 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package appendblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// Type Declarations --------------------------------------------------------------------- + +// AppendPositionAccessConditions contains a group of parameters for the Client.AppendBlock method. +type AppendPositionAccessConditions = generated.AppendPositionAccessConditions + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CreateOptions provides set of configurations for Create Append Blob operation +type CreateOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + AccessConditions *blob.AccessConditions + + HTTPHeaders *blob.HTTPHeaders + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string +} + +func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := generated.AppendBlobClientCreateOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + Metadata: o.Metadata, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlockOptions contains the optional parameters for the Client.AppendBlock method. +type AppendBlockOptions struct { + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AppendPositionAccessConditions *AppendPositionAccessConditions + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + AccessConditions *blob.AccessConditions +} + +func (o *AppendBlockOptions) format() (*generated.AppendBlobClientAppendBlockOptions, *generated.AppendPositionAccessConditions, + *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &generated.AppendBlobClientAppendBlockOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlockFromURLOptions contains the optional parameters for the Client.AppendBlockFromURL method. +type AppendBlockFromURLOptions struct { + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AppendPositionAccessConditions *AppendPositionAccessConditions + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions + + AccessConditions *blob.AccessConditions + + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange +} + +func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendBlockFromURLOptions, *generated.CpkInfo, + *generated.CpkScopeInfo, *generated.LeaseAccessConditions, *generated.AppendPositionAccessConditions, + *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := &generated.AppendBlobClientAppendBlockFromURLOptions{ + SourceRange: exported.FormatHTTPRange(o.Range), + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SealOptions provides set of configurations for SealAppendBlob operation +type SealOptions struct { + AccessConditions *blob.AccessConditions + AppendPositionAccessConditions *AppendPositionAccessConditions +} + +func (o *SealOptions) format() (*generated.LeaseAccessConditions, + *generated.ModifiedAccessConditions, *generated.AppendPositionAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return leaseAccessConditions, modifiedAccessConditions, o.AppendPositionAccessConditions + +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go new file mode 100644 index 000000000000..e9ab4a3ccdcc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go @@ -0,0 +1,23 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package appendblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.AppendBlobClientCreateResponse + +// AppendBlockResponse contains the response from method Client.AppendBlock. +type AppendBlockResponse = generated.AppendBlobClientAppendBlockResponse + +// AppendBlockFromURLResponse contains the response from method Client.AppendBlockFromURL. +type AppendBlockFromURLResponse = generated.AppendBlobClientAppendBlockFromURLResponse + +// SealResponse contains the response from method Client.Seal. +type SealResponse = generated.AppendBlobClientSealResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go new file mode 100644 index 000000000000..1b7cf6d865af --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -0,0 +1,423 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "context" + "errors" + "io" + "os" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type Client base.Client[generated.BlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlobClient(blobURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (b *Client) generated() *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + +func (b *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.BlobClient])(b)) +} + +// URL returns the URL endpoint used by the Client object. +func (b *Client) URL() string { + return b.generated().Endpoint() +} + +// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := ParseURL(b.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (b *Client) WithVersionID(versionID string) (*Client, error) { + p, err := ParseURL(b.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b *Client) Delete(ctx context.Context, o *DeleteOptions) (DeleteResponse, error) { + deleteOptions, leaseInfo, accessConditions := o.format() + resp, err := b.generated().Delete(ctx, deleteOptions, leaseInfo, accessConditions) + return resp, err +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (b *Client) Undelete(ctx context.Context, o *UndeleteOptions) (UndeleteResponse, error) { + undeleteOptions := o.format() + resp, err := b.generated().Undelete(ctx, undeleteOptions) + return resp, err +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (b *Client) SetTier(ctx context.Context, tier AccessTier, o *SetTierOptions) (SetTierResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + resp, err := b.generated().SetTier(ctx, tier, opts, leaseAccessConditions, modifiedAccessConditions) + return resp, err +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (b *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions := options.format() + resp, err := b.generated().GetProperties(ctx, opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + return resp, err +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + resp, err := b.generated().SetHTTPHeaders(ctx, opts, &HTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + return resp, err +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (b *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *SetMetadataOptions) (SetMetadataResponse, error) { + basics := generated.BlobClientSetMetadataOptions{Metadata: metadata} + leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions := o.format() + resp, err := b.generated().SetMetadata(ctx, &basics, leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) + return resp, err +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (b *Client) CreateSnapshot(ctx context.Context, options *CreateSnapshotOptions) (CreateSnapshotResponse, error) { + // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter + // because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this + // performance hit. + opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().CreateSnapshot(ctx, opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions) + + return resp, err +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (b *Client) StartCopyFromURL(ctx context.Context, copySource string, options *StartCopyFromURLOptions) (StartCopyFromURLResponse, error) { + opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().StartCopyFromURL(ctx, copySource, opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + return resp, err +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (b *Client) AbortCopyFromURL(ctx context.Context, copyID string, options *AbortCopyFromURLOptions) (AbortCopyFromURLResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := b.generated().AbortCopyFromURL(ctx, copyID, opts, leaseAccessConditions) + return resp, err +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (b *Client) SetTags(ctx context.Context, tags map[string]string, options *SetTagsOptions) (SetTagsResponse, error) { + serializedTags := shared.SerializeBlobTags(tags) + blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().SetTags(ctx, *serializedTags, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + return resp, err +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (b *Client) GetTags(ctx context.Context, options *GetTagsOptions) (GetTagsResponse, error) { + blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + return resp, err + +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) { + copyOptions, smac, mac, lac := options.format() + resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac) + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (b *Client) GetSASURL(permissions sas.BlobPermissions, start time.Time, expiry time.Time) (string, error) { + if b.sharedKey() == nil { + return "", errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential") + } + + urlParts, err := ParseURL(b.URL()) + if err != nil { + return "", err + } + + t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot) + + if err != nil { + t = time.Time{} + } + + qps, err := sas.BlobSignatureValues{ + ContainerName: urlParts.ContainerName, + BlobName: urlParts.BlobName, + SnapshotTime: t, + Version: sas.Version, + + Permissions: permissions.String(), + + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(b.sharedKey()) + + if err != nil { + return "", err + } + + endpoint := b.URL() + "?" + qps.Encode() + + return endpoint, nil +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// download downloads an Azure blob to a WriterAt in parallel. +func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { + if o.BlockSize == 0 { + o.BlockSize = DefaultDownloadBlockSize + } + + count := o.Range.Count + if count == CountToEnd { // If size not specified, calculate it + // If we don't have the length at all, get it + downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{}, nil) + dr, err := b.DownloadStream(ctx, downloadBlobOptions) + if err != nil { + return 0, err + } + count = *dr.ContentLength - o.Range.Offset + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return 0, nil + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "downloadBlobToWriterAt", + TransferSize: count, + ChunkSize: o.BlockSize, + Concurrency: o.Concurrency, + Operation: func(chunkStart int64, count int64, ctx context.Context) error { + + downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ + Offset: chunkStart + o.Range.Offset, + Count: count, + }, nil) + dr, err := b.DownloadStream(ctx, downloadBlobOptions) + if err != nil { + return err + } + var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = streaming.NewResponseProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.Copy(shared.NewSectionWriter(writer, chunkStart, count), body) + if err != nil { + return err + } + err = body.Close() + return err + }, + }) + if err != nil { + return 0, err + } + return count, nil +} + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) { + downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions := o.format() + if o == nil { + o = &DownloadStreamOptions{} + } + + dr, err := b.generated().Download(ctx, downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return DownloadStreamResponse{}, err + } + + return DownloadStreamResponse{ + client: b, + BlobClientDownloadResponse: dr, + getInfo: httpGetterInfo{Range: o.Range, ETag: dr.ETag}, + ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), + cpkInfo: o.CpkInfo, + cpkScope: o.CpkScopeInfo, + }, err +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadBufferOptions) (int64, error) { + if o == nil { + o = &DownloadBufferOptions{} + } + return b.download(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFileOptions) (int64, error) { + if o == nil { + o = &DownloadFileOptions{} + } + do := (*downloadOptions)(o) + + // 1. Calculate the size of the destination file + var size int64 + + count := do.Range.Count + if count == CountToEnd { + // Try to get Azure blob's size + getBlobPropertiesOptions := do.getBlobPropertiesOptions() + props, err := b.GetProperties(ctx, getBlobPropertiesOptions) + if err != nil { + return 0, err + } + size = *props.ContentLength - do.Range.Offset + } else { + size = count + } + + // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. + stat, err := file.Stat() + if err != nil { + return 0, err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return 0, err + } + } + + if size > 0 { + return b.download(ctx, file, *do) + } else { // if the blob's size is 0, there is no need in downloading it + return 0, nil + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go new file mode 100644 index 000000000000..d0e5d7d825cc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go @@ -0,0 +1,241 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +const ( + CountToEnd = 0 + + SnapshotTimeFormat = exported.SnapshotTimeFormat + + // DefaultDownloadBlockSize is default block size + DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB +) + +// BlobType defines values for BlobType +type BlobType = generated.BlobType + +const ( + BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob + BlobTypePageBlob BlobType = generated.BlobTypePageBlob + BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return generated.PossibleBlobTypeValues() +} + +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType +type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return generated.PossibleDeleteSnapshotsOptionTypeValues() +} + +// AccessTier defines values for Blob Access Tier +type AccessTier = generated.AccessTier + +const ( + AccessTierArchive AccessTier = generated.AccessTierArchive + AccessTierCool AccessTier = generated.AccessTierCool + AccessTierHot AccessTier = generated.AccessTierHot + AccessTierP10 AccessTier = generated.AccessTierP10 + AccessTierP15 AccessTier = generated.AccessTierP15 + AccessTierP20 AccessTier = generated.AccessTierP20 + AccessTierP30 AccessTier = generated.AccessTierP30 + AccessTierP4 AccessTier = generated.AccessTierP4 + AccessTierP40 AccessTier = generated.AccessTierP40 + AccessTierP50 AccessTier = generated.AccessTierP50 + AccessTierP6 AccessTier = generated.AccessTierP6 + AccessTierP60 AccessTier = generated.AccessTierP60 + AccessTierP70 AccessTier = generated.AccessTierP70 + AccessTierP80 AccessTier = generated.AccessTierP80 + AccessTierPremium AccessTier = generated.AccessTierPremium +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return generated.PossibleAccessTierValues() +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority = generated.RehydratePriority + +const ( + RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh + RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return generated.PossibleRehydratePriorityValues() +} + +// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode +type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return generated.PossibleImmutabilityPolicyModeValues() +} + +// ImmutabilityPolicySetting returns the possible values for the ImmutabilityPolicySetting const type. +type ImmutabilityPolicySetting = generated.ImmutabilityPolicySetting + +const ( + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingUnlocked + ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingLocked +) + +// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. +func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { + return generated.PossibleImmutabilityPolicySettingValues() +} + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// EncryptionAlgorithmType defines values for EncryptionAlgorithmType +type EncryptionAlgorithmType = generated.EncryptionAlgorithmType + +const ( + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeNone + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeAES256 +) + +// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return generated.PossibleEncryptionAlgorithmTypeValues() +} + +// ArchiveStatus defines values for ArchiveStatus +type ArchiveStatus = generated.ArchiveStatus + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool + ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return generated.PossibleArchiveStatusValues() +} + +// DeleteType defines values for DeleteType +type DeleteType = generated.DeleteType + +const ( + DeleteTypeNone DeleteType = generated.DeleteTypeNone + DeleteTypePermanent DeleteType = generated.DeleteTypePermanent +) + +// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. +func PossibleDeleteTypeValues() []DeleteType { + return generated.PossibleDeleteTypeValues() +} + +// ExpiryOptions defines values for ExpiryOptions +type ExpiryOptions = generated.ExpiryOptions + +const ( + ExpiryOptionsAbsolute ExpiryOptions = generated.ExpiryOptionsAbsolute + ExpiryOptionsNeverExpire ExpiryOptions = generated.ExpiryOptionsNeverExpire + ExpiryOptionsRelativeToCreation ExpiryOptions = generated.ExpiryOptionsRelativeToCreation + ExpiryOptionsRelativeToNow ExpiryOptions = generated.ExpiryOptionsRelativeToNow +) + +// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type. +func PossibleExpiryOptionsValues() []ExpiryOptions { + return generated.PossibleExpiryOptionsValues() +} + +// QueryFormatType - The quick query format type. +type QueryFormatType = generated.QueryFormatType + +const ( + QueryFormatTypeDelimited QueryFormatType = generated.QueryFormatTypeDelimited + QueryFormatTypeJSON QueryFormatType = generated.QueryFormatTypeJSON + QueryFormatTypeArrow QueryFormatType = generated.QueryFormatTypeArrow + QueryFormatTypeParquet QueryFormatType = generated.QueryFormatTypeParquet +) + +// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return generated.PossibleQueryFormatTypeValues() +} + +// LeaseDurationType defines values for LeaseDurationType +type LeaseDurationType = generated.LeaseDurationType + +const ( + LeaseDurationTypeInfinite LeaseDurationType = generated.LeaseDurationTypeInfinite + LeaseDurationTypeFixed LeaseDurationType = generated.LeaseDurationTypeFixed +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return generated.PossibleLeaseDurationTypeValues() +} + +// LeaseStateType defines values for LeaseStateType +type LeaseStateType = generated.LeaseStateType + +const ( + LeaseStateTypeAvailable LeaseStateType = generated.LeaseStateTypeAvailable + LeaseStateTypeLeased LeaseStateType = generated.LeaseStateTypeLeased + LeaseStateTypeExpired LeaseStateType = generated.LeaseStateTypeExpired + LeaseStateTypeBreaking LeaseStateType = generated.LeaseStateTypeBreaking + LeaseStateTypeBroken LeaseStateType = generated.LeaseStateTypeBroken +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return generated.PossibleLeaseStateTypeValues() +} + +// LeaseStatusType defines values for LeaseStatusType +type LeaseStatusType = generated.LeaseStatusType + +const ( + LeaseStatusTypeLocked LeaseStatusType = generated.LeaseStatusTypeLocked + LeaseStatusTypeUnlocked LeaseStatusType = generated.LeaseStatusTypeUnlocked +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return generated.PossibleLeaseStatusTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go new file mode 100644 index 000000000000..86af620c3574 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go @@ -0,0 +1,492 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// Type Declarations --------------------------------------------------------------------- + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.BlobAccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions + +// CpkInfo contains a group of parameters for client provided encryption key. +type CpkInfo = generated.CpkInfo + +// CpkScopeInfo contains a group of parameters for client provided encryption scope. +type CpkScopeInfo = generated.CpkScopeInfo + +// HTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type HTTPHeaders = generated.BlobHTTPHeaders + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions + +// Tags represent map of blob index tags +type Tags = generated.BlobTag + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// DownloadStreamOptions contains the optional parameters for the Client.Download method. +type DownloadStreamOptions struct { + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + AccessConditions *AccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + basics := generated.BlobClientDownloadOptions{ + RangeGetContentMD5: o.RangeGetContentMD5, + Range: exported.FormatHTTPRange(o.Range), + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// downloadOptions contains common options used by the DownloadBuffer and DownloadFile functions. +type downloadOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel (0=default) + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +func (o *downloadOptions) getBlobPropertiesOptions() *GetPropertiesOptions { + if o == nil { + return nil + } + return &GetPropertiesOptions{ + AccessConditions: o.AccessConditions, + CpkInfo: o.CpkInfo, + } +} + +func (o *downloadOptions) getDownloadBlobOptions(rnge HTTPRange, rangeGetContentMD5 *bool) *DownloadStreamOptions { + if o == nil { + return nil + } + return &DownloadStreamOptions{ + AccessConditions: o.AccessConditions, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + Range: rnge, + RangeGetContentMD5: rangeGetContentMD5, + } +} + +// DownloadBufferOptions contains the optional parameters for the DownloadBuffer method. +type DownloadBufferOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // CpkInfo contains a group of parameters for client provided encryption key. + CpkInfo *CpkInfo + + // CpkScopeInfo contains a group of parameters for client provided encryption scope. + CpkScopeInfo *CpkScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel (0=default) + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// DownloadFileOptions contains the optional parameters for the DownloadFile method. +type DownloadFileOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel. The default value is 5. + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself + DeleteSnapshots *DeleteSnapshotsOptionType + AccessConditions *AccessConditions +} + +func (o *DeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + } + + if o.AccessConditions == nil { + return &basics, nil, nil + } + + return &basics, o.AccessConditions.LeaseAccessConditions, o.AccessConditions.ModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UndeleteOptions contains the optional parameters for the Client.Undelete method. +type UndeleteOptions struct { + // placeholder for future options +} + +func (o *UndeleteOptions) format() *generated.BlobClientUndeleteOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetTierOptions contains the optional parameters for the Client.SetTier method. +type SetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + AccessConditions *AccessConditions +} + +func (o *SetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.BlobClientSetTierOptions{RehydratePriority: o.RehydratePriority}, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method +type GetPropertiesOptions struct { + AccessConditions *AccessConditions + CpkInfo *CpkInfo +} + +func (o *GetPropertiesOptions) format() (*generated.BlobClientGetPropertiesOptions, + *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method. +type SetHTTPHeadersOptions struct { + AccessConditions *AccessConditions +} + +func (o *SetHTTPHeadersOptions) format() (*generated.BlobClientSetHTTPHeadersOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions provides set of configurations for Set Metadata on blob operation +type SetMetadataOptions struct { + AccessConditions *AccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (o *SetMetadataOptions) format() (*generated.LeaseAccessConditions, *CpkInfo, + *CpkScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateSnapshotOptions contains the optional parameters for the Client.CreateSnapshot method. +type CreateSnapshotOptions struct { + Metadata map[string]string + AccessConditions *AccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOptions, *generated.CpkInfo, + *generated.CpkScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + + return &generated.BlobClientCreateSnapshotOptions{ + Metadata: o.Metadata, + }, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StartCopyFromURLOptions contains the optional parameters for the Client.StartCopyFromURL method. +type StartCopyFromURLOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Used to set blob tags in various blob operations. + BlobTags map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + AccessConditions *AccessConditions +} + +func (o *StartCopyFromURLOptions) format() (*generated.BlobClientStartCopyFromURLOptions, + *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + basics := generated.BlobClientStartCopyFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags), + Metadata: o.Metadata, + RehydratePriority: o.RehydratePriority, + SealBlob: o.SealBlob, + Tier: o.Tier, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AbortCopyFromURLOptions contains the optional parameters for the Client.AbortCopyFromURL method. +type AbortCopyFromURLOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *AbortCopyFromURLOptions) format() (*generated.BlobClientAbortCopyFromURLOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetTagsOptions contains the optional parameters for the Client.SetTags method. +type SetTagsOptions struct { + // The version id parameter is an opaque DateTime value that, when present, + // specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + VersionID *string + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Optional header, Specifies the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AccessConditions *AccessConditions +} + +func (o *SetTagsOptions) format() (*generated.BlobClientSetTagsOptions, *ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &generated.BlobClientSetTagsOptions{ + TransactionalContentMD5: o.TransactionalContentMD5, + TransactionalContentCRC64: o.TransactionalContentCRC64, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetTagsOptions contains the optional parameters for the Client.GetTags method. +type GetTagsOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. + Snapshot *string + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string + + BlobAccessConditions *AccessConditions +} + +func (o *GetTagsOptions) format() (*generated.BlobClientGetTagsOptions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &generated.BlobClientGetTagsOptions{ + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) + return options, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CopyFromURLOptions contains the optional parameters for the Client.CopyFromURL method. +type CopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTags map[string]string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + BlobAccessConditions *AccessConditions +} + +func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + options := &generated.BlobClientCopyFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags), + CopySourceAuthorization: o.CopySourceAuthorization, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + Metadata: o.Metadata, + SourceContentMD5: o.SourceContentMD5, + Tier: o.Tier, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) + return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go new file mode 100644 index 000000000000..c13d332d7197 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "context" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// DownloadStreamResponse contains the response from the DownloadStream method. +// To read from the stream, read from the Body field, or call the NewRetryReader method. +type DownloadStreamResponse struct { + generated.BlobClientDownloadResponse + ObjectReplicationRules []ObjectReplicationPolicy + + client *Client + getInfo httpGetterInfo + cpkInfo *CpkInfo + cpkScope *CpkScopeInfo +} + +// NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while +// reading, it will make additional requests to reestablish a connection and continue reading. +// Pass nil for options to accept the default options. +// Callers of this method should not access the DowloadStreamResponse.Body field. +func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader { + if options == nil { + options = &RetryReaderOptions{} + } + + return newRetryReader(ctx, r.Body, r.getInfo, func(ctx context.Context, getInfo httpGetterInfo) (io.ReadCloser, error) { + accessConditions := &AccessConditions{ + ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: getInfo.ETag}, + } + options := DownloadStreamOptions{ + Range: getInfo.Range, + AccessConditions: accessConditions, + CpkInfo: r.cpkInfo, + CpkScopeInfo: r.cpkScope, + } + resp, err := r.client.DownloadStream(ctx, &options) + if err != nil { + return nil, err + } + return resp.Body, err + }, *options) +} + +// DeleteResponse contains the response from method BlobClient.Delete. +type DeleteResponse = generated.BlobClientDeleteResponse + +// UndeleteResponse contains the response from method BlobClient.Undelete. +type UndeleteResponse = generated.BlobClientUndeleteResponse + +// SetTierResponse contains the response from method BlobClient.SetTier. +type SetTierResponse = generated.BlobClientSetTierResponse + +// GetPropertiesResponse contains the response from method BlobClient.GetProperties. +type GetPropertiesResponse = generated.BlobClientGetPropertiesResponse + +// SetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type SetHTTPHeadersResponse = generated.BlobClientSetHTTPHeadersResponse + +// SetMetadataResponse contains the response from method BlobClient.SetMetadata. +type SetMetadataResponse = generated.BlobClientSetMetadataResponse + +// CreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. +type CreateSnapshotResponse = generated.BlobClientCreateSnapshotResponse + +// StartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type StartCopyFromURLResponse = generated.BlobClientStartCopyFromURLResponse + +// AbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. +type AbortCopyFromURLResponse = generated.BlobClientAbortCopyFromURLResponse + +// SetTagsResponse contains the response from method BlobClient.SetTags. +type SetTagsResponse = generated.BlobClientSetTagsResponse + +// GetTagsResponse contains the response from method BlobClient.GetTags. +type GetTagsResponse = generated.BlobClientGetTagsResponse + +// CopyFromURLResponse contains the response from method BlobClient.CopyFromURL. +type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse + +// AcquireLeaseResponse contains the response from method BlobClient.AcquireLease. +type AcquireLeaseResponse = generated.BlobClientAcquireLeaseResponse + +// BreakLeaseResponse contains the response from method BlobClient.BreakLease. +type BreakLeaseResponse = generated.BlobClientBreakLeaseResponse + +// ChangeLeaseResponse contains the response from method BlobClient.ChangeLease. +type ChangeLeaseResponse = generated.BlobClientChangeLeaseResponse + +// ReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type ReleaseLeaseResponse = generated.BlobClientReleaseLeaseResponse + +// RenewLeaseResponse contains the response from method BlobClient.RenewLease. +type RenewLeaseResponse = generated.BlobClientRenewLeaseResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go new file mode 100644 index 000000000000..cc1b1365d7c6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go @@ -0,0 +1,192 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "context" + "io" + "net" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. +type httpGetter func(ctx context.Context, i httpGetterInfo) (io.ReadCloser, error) + +// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters +// that should be used to make an HTTP GET request. +type httpGetterInfo struct { + Range HTTPRange + + // ETag specifies the resource's etag that should be used when creating + // the HTTP GET request's If-Match header + ETag *azcore.ETag +} + +// RetryReaderOptions configures the retry reader's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. +type RetryReaderOptions struct { + // MaxRetries specifies the maximum number of attempts a failed read will be retried + // before producing an error. + // The default value is three. + MaxRetries int32 + + // OnFailedRead, when non-nil, is called after any failure to read. Expected usage is diagnostic logging. + OnFailedRead func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) + + // EarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, + // retryReader has the following special behaviour: closing the response body before it is all read is treated as a + // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = + // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If + // TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead + // treated as a fatal (non-retryable) error. + // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens + // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors + // which will be retried. + // The default value is false. + EarlyCloseAsError bool + + doInjectError bool + doInjectErrorRound int32 + injectedError error +} + +// RetryReader attempts to read from response, and if there is retriable network error +// returned during reading, it will retry according to retry reader option through executing +// user defined action with provided data to get a new response, and continue the overall reading process +// through reading from the new response. +// RetryReader implements the io.ReadCloser interface. +type RetryReader struct { + ctx context.Context + info httpGetterInfo + retryReaderOptions RetryReaderOptions + getter httpGetter + countWasBounded bool + + // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response + responseMu *sync.Mutex + response io.ReadCloser +} + +// newRetryReader creates a retry reader. +func newRetryReader(ctx context.Context, initialResponse io.ReadCloser, info httpGetterInfo, getter httpGetter, o RetryReaderOptions) *RetryReader { + if o.MaxRetries < 1 { + o.MaxRetries = 3 + } + return &RetryReader{ + ctx: ctx, + getter: getter, + info: info, + countWasBounded: info.Range.Count != CountToEnd, + response: initialResponse, + responseMu: &sync.Mutex{}, + retryReaderOptions: o, + } +} + +// setResponse function +func (s *RetryReader) setResponse(r io.ReadCloser) { + s.responseMu.Lock() + defer s.responseMu.Unlock() + s.response = r +} + +// Read from retry reader +func (s *RetryReader) Read(p []byte) (n int, err error) { + for try := int32(0); ; try++ { + //fmt.Println(try) // Comment out for debugging. + if s.countWasBounded && s.info.Range.Count == CountToEnd { + // User specified an original count and the remaining bytes are 0, return 0, EOF + return 0, io.EOF + } + + s.responseMu.Lock() + resp := s.response + s.responseMu.Unlock() + if resp == nil { // We don't have a response stream to read from, try to get one. + newResponse, err := s.getter(s.ctx, s.info) + if err != nil { + return 0, err + } + // Successful GET; this is the network stream we'll read from. + s.setResponse(newResponse) + resp = newResponse + } + n, err := resp.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) + + // Injection mechanism for testing. + if s.retryReaderOptions.doInjectError && try == s.retryReaderOptions.doInjectErrorRound { + if s.retryReaderOptions.injectedError != nil { + err = s.retryReaderOptions.injectedError + } else { + err = &net.DNSError{IsTemporary: true} + } + } + + // We successfully read data or end EOF. + if err == nil || err == io.EOF { + s.info.Range.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Range.Count != CountToEnd { + s.info.Range.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + } + return n, err // Return the return to the caller + } + _ = s.Close() + + s.setResponse(nil) // Our stream is no longer good + + // Check the retry count and error code, and decide whether to retry. + retriesExhausted := try >= s.retryReaderOptions.MaxRetries + _, isNetError := err.(net.Error) + isUnexpectedEOF := err == io.ErrUnexpectedEOF + willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted + + // Notify, for logging purposes, of any failures + if s.retryReaderOptions.OnFailedRead != nil { + failureCount := try + 1 // because try is zero-based + s.retryReaderOptions.OnFailedRead(failureCount, err, s.info.Range, willRetry) + } + + if willRetry { + continue + // Loop around and try to get and read from new stream. + } + return n, err // Not retryable, or retries exhausted, so just return + } +} + +// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry +// Is this safe, to close early from another goroutine? Early close ultimately ends up calling +// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" +// which is exactly the behaviour we want. +// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) +// then there are two different types of error that may happen - either the one one we check for here, +// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine +// to check for one, since the other is a net.Error, which our main Read retry loop is already handing. +func (s *RetryReader) wasRetryableEarlyClose(err error) bool { + if s.retryReaderOptions.EarlyCloseAsError { + return false // user wants all early closes to be errors, and so not retryable + } + // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text + return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) +} + +// ReadOnClosedBodyMessage of retry reader +const ReadOnClosedBodyMessage = "read on closed response body" + +// Close retry reader +func (s *RetryReader) Close() error { + s.responseMu.Lock() + defer s.responseMu.Unlock() + if s.response != nil { + return s.response.Close() + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go new file mode 100644 index 000000000000..7b4c8e248991 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ObjectReplicationRules struct +type ObjectReplicationRules struct { + RuleId string + Status string +} + +// ObjectReplicationPolicy are deserialized attributes +type ObjectReplicationPolicy struct { + PolicyId *string + Rules *[]ObjectReplicationRules +} + +// deserializeORSPolicies is utility function to deserialize ORS Policies +func deserializeORSPolicies(policies map[string]string) (objectReplicationPolicies []ObjectReplicationPolicy) { + if policies == nil { + return nil + } + // For source blobs (blobs that have policy ids and rule ids applied to them), + // the header will be formatted as "x-ms-or-_: {Complete, Failed}". + // The value of this header is the status of the replication. + orPolicyStatusHeader := make(map[string]string) + for key, value := range policies { + if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" { + orPolicyStatusHeader[key] = value + } + } + + parsedResult := make(map[string][]ObjectReplicationRules) + for key, value := range orPolicyStatusHeader { + policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_") + policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1] + + parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleId: ruleId, Status: value}) + } + + for policyId, rules := range parsedResult { + objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{ + PolicyId: &policyId, + Rules: &rules, + }) + } + return +} + +// ParseHTTPHeaders parses GetPropertiesResponse and returns HTTPHeaders +func ParseHTTPHeaders(resp GetPropertiesResponse) HTTPHeaders { + return HTTPHeaders{ + BlobContentType: resp.ContentType, + BlobContentEncoding: resp.ContentEncoding, + BlobContentLanguage: resp.ContentLanguage, + BlobContentDisposition: resp.ContentDisposition, + BlobCacheControl: resp.CacheControl, + BlobContentMD5: resp.ContentMD5, + } +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go new file mode 100644 index 000000000000..6a1db8045c52 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go @@ -0,0 +1,150 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package bloberror + +import ( + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// HasCode returns true if the provided error is an *azcore.ResponseError +// with its ErrorCode field equal to one of the specified Codes. +func HasCode(err error, codes ...Code) bool { + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return false + } + + for _, code := range codes { + if respErr.ErrorCode == string(code) { + return true + } + } + + return false +} + +// Code - Error codes returned by the service +type Code = generated.StorageErrorCode + +const ( + AccountAlreadyExists Code = "AccountAlreadyExists" + AccountBeingCreated Code = "AccountBeingCreated" + AccountIsDisabled Code = "AccountIsDisabled" + AppendPositionConditionNotMet Code = "AppendPositionConditionNotMet" + AuthenticationFailed Code = "AuthenticationFailed" + AuthorizationFailure Code = "AuthorizationFailure" + AuthorizationPermissionMismatch Code = "AuthorizationPermissionMismatch" + AuthorizationProtocolMismatch Code = "AuthorizationProtocolMismatch" + AuthorizationResourceTypeMismatch Code = "AuthorizationResourceTypeMismatch" + AuthorizationServiceMismatch Code = "AuthorizationServiceMismatch" + AuthorizationSourceIPMismatch Code = "AuthorizationSourceIPMismatch" + BlobAlreadyExists Code = "BlobAlreadyExists" + BlobArchived Code = "BlobArchived" + BlobBeingRehydrated Code = "BlobBeingRehydrated" + BlobImmutableDueToPolicy Code = "BlobImmutableDueToPolicy" + BlobNotArchived Code = "BlobNotArchived" + BlobNotFound Code = "BlobNotFound" + BlobOverwritten Code = "BlobOverwritten" + BlobTierInadequateForContentLength Code = "BlobTierInadequateForContentLength" + BlobUsesCustomerSpecifiedEncryption Code = "BlobUsesCustomerSpecifiedEncryption" + BlockCountExceedsLimit Code = "BlockCountExceedsLimit" + BlockListTooLong Code = "BlockListTooLong" + CannotChangeToLowerTier Code = "CannotChangeToLowerTier" + CannotVerifyCopySource Code = "CannotVerifyCopySource" + ConditionHeadersNotSupported Code = "ConditionHeadersNotSupported" + ConditionNotMet Code = "ConditionNotMet" + ContainerAlreadyExists Code = "ContainerAlreadyExists" + ContainerBeingDeleted Code = "ContainerBeingDeleted" + ContainerDisabled Code = "ContainerDisabled" + ContainerNotFound Code = "ContainerNotFound" + ContentLengthLargerThanTierLimit Code = "ContentLengthLargerThanTierLimit" + CopyAcrossAccountsNotSupported Code = "CopyAcrossAccountsNotSupported" + CopyIDMismatch Code = "CopyIdMismatch" + EmptyMetadataKey Code = "EmptyMetadataKey" + FeatureVersionMismatch Code = "FeatureVersionMismatch" + IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch" + IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" + InfiniteLeaseDurationRequired Code = "InfiniteLeaseDurationRequired" + InsufficientAccountPermissions Code = "InsufficientAccountPermissions" + InternalError Code = "InternalError" + InvalidAuthenticationInfo Code = "InvalidAuthenticationInfo" + InvalidBlobOrBlock Code = "InvalidBlobOrBlock" + InvalidBlobTier Code = "InvalidBlobTier" + InvalidBlobType Code = "InvalidBlobType" + InvalidBlockID Code = "InvalidBlockId" + InvalidBlockList Code = "InvalidBlockList" + InvalidHTTPVerb Code = "InvalidHttpVerb" + InvalidHeaderValue Code = "InvalidHeaderValue" + InvalidInput Code = "InvalidInput" + InvalidMD5 Code = "InvalidMd5" + InvalidMetadata Code = "InvalidMetadata" + InvalidOperation Code = "InvalidOperation" + InvalidPageRange Code = "InvalidPageRange" + InvalidQueryParameterValue Code = "InvalidQueryParameterValue" + InvalidRange Code = "InvalidRange" + InvalidResourceName Code = "InvalidResourceName" + InvalidSourceBlobType Code = "InvalidSourceBlobType" + InvalidSourceBlobURL Code = "InvalidSourceBlobUrl" + InvalidURI Code = "InvalidUri" + InvalidVersionForPageBlobOperation Code = "InvalidVersionForPageBlobOperation" + InvalidXMLDocument Code = "InvalidXmlDocument" + InvalidXMLNodeValue Code = "InvalidXmlNodeValue" + LeaseAlreadyBroken Code = "LeaseAlreadyBroken" + LeaseAlreadyPresent Code = "LeaseAlreadyPresent" + LeaseIDMismatchWithBlobOperation Code = "LeaseIdMismatchWithBlobOperation" + LeaseIDMismatchWithContainerOperation Code = "LeaseIdMismatchWithContainerOperation" + LeaseIDMismatchWithLeaseOperation Code = "LeaseIdMismatchWithLeaseOperation" + LeaseIDMissing Code = "LeaseIdMissing" + LeaseIsBreakingAndCannotBeAcquired Code = "LeaseIsBreakingAndCannotBeAcquired" + LeaseIsBreakingAndCannotBeChanged Code = "LeaseIsBreakingAndCannotBeChanged" + LeaseIsBrokenAndCannotBeRenewed Code = "LeaseIsBrokenAndCannotBeRenewed" + LeaseLost Code = "LeaseLost" + LeaseNotPresentWithBlobOperation Code = "LeaseNotPresentWithBlobOperation" + LeaseNotPresentWithContainerOperation Code = "LeaseNotPresentWithContainerOperation" + LeaseNotPresentWithLeaseOperation Code = "LeaseNotPresentWithLeaseOperation" + MD5Mismatch Code = "Md5Mismatch" + MaxBlobSizeConditionNotMet Code = "MaxBlobSizeConditionNotMet" + MetadataTooLarge Code = "MetadataTooLarge" + MissingContentLengthHeader Code = "MissingContentLengthHeader" + MissingRequiredHeader Code = "MissingRequiredHeader" + MissingRequiredQueryParameter Code = "MissingRequiredQueryParameter" + MissingRequiredXMLNode Code = "MissingRequiredXmlNode" + MultipleConditionHeadersNotSupported Code = "MultipleConditionHeadersNotSupported" + NoAuthenticationInformation Code = "NoAuthenticationInformation" + NoPendingCopyOperation Code = "NoPendingCopyOperation" + OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" + OperationTimedOut Code = "OperationTimedOut" + OutOfRangeInput Code = "OutOfRangeInput" + OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" + PendingCopyOperation Code = "PendingCopyOperation" + PreviousSnapshotCannotBeNewer Code = "PreviousSnapshotCannotBeNewer" + PreviousSnapshotNotFound Code = "PreviousSnapshotNotFound" + PreviousSnapshotOperationNotSupported Code = "PreviousSnapshotOperationNotSupported" + RequestBodyTooLarge Code = "RequestBodyTooLarge" + RequestURLFailedToParse Code = "RequestUrlFailedToParse" + ResourceAlreadyExists Code = "ResourceAlreadyExists" + ResourceNotFound Code = "ResourceNotFound" + ResourceTypeMismatch Code = "ResourceTypeMismatch" + SequenceNumberConditionNotMet Code = "SequenceNumberConditionNotMet" + SequenceNumberIncrementTooLarge Code = "SequenceNumberIncrementTooLarge" + ServerBusy Code = "ServerBusy" + SnapshotCountExceeded Code = "SnapshotCountExceeded" + SnapshotOperationRateExceeded Code = "SnapshotOperationRateExceeded" + SnapshotsPresent Code = "SnapshotsPresent" + SourceConditionNotMet Code = "SourceConditionNotMet" + SystemInUse Code = "SystemInUse" + TargetConditionNotMet Code = "TargetConditionNotMet" + UnauthorizedBlobOverwrite Code = "UnauthorizedBlobOverwrite" + UnsupportedHTTPVerb Code = "UnsupportedHttpVerb" + UnsupportedHeader Code = "UnsupportedHeader" + UnsupportedQueryParameter Code = "UnsupportedQueryParameter" + UnsupportedXMLNode Code = "UnsupportedXmlNode" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go new file mode 100644 index 000000000000..16927ecf895e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go @@ -0,0 +1,236 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// blockWriter provides methods to upload blocks that represent a file to a server and commit them. +// This allows us to provide a local implementation that fakes the server for hermetic testing. +type blockWriter interface { + StageBlock(context.Context, string, io.ReadSeekCloser, *StageBlockOptions) (StageBlockResponse, error) + CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error) +} + +// copyFromReader copies a source io.Reader to blob storage using concurrent uploads. +// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably +// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The +// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload +// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works +// well, 4 MiB or 8 MiB, and auto-scale to as many goroutines within the memory limit. This gives a single dial to tweak and we can +// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model). +// We can even provide a utility to dial this number in for customer networks to optimize their copies. +func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (CommitBlockListResponse, error) { + if err := o.format(); err != nil { + return CommitBlockListResponse{}, err + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var err error + generatedUuid, err := uuid.New() + if err != nil { + return CommitBlockListResponse{}, err + } + + cp := &copier{ + ctx: ctx, + cancel: cancel, + reader: from, + to: to, + id: newID(generatedUuid), + o: o, + errCh: make(chan error, 1), + } + + // Send all our chunks until we get an error. + for { + if err = cp.sendChunk(); err != nil { + break + } + } + // If the error is not EOF, then we have a problem. + if err != nil && !errors.Is(err, io.EOF) { + return CommitBlockListResponse{}, err + } + + // Close out our upload. + if err := cp.close(); err != nil { + return CommitBlockListResponse{}, err + } + + return cp.result, nil +} + +// copier streams a file via chunks in parallel from a reader representing a file. +// Do not use directly, instead use copyFromReader(). +type copier struct { + // ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case, + // the copier has the lifetime of a function call, so it's fine. + ctx context.Context + cancel context.CancelFunc + + // reader is the source to be written to storage. + reader io.Reader + // to is the location we are writing our chunks to. + to blockWriter + + // o contains our options for uploading. + o UploadStreamOptions + + // id provides the ids for each chunk. + id *id + + //// num is the current chunk we are on. + //num int32 + //// ch is used to pass the next chunk of data from our reader to one of the writers. + //ch chan copierChunk + + // errCh is used to hold the first error from our concurrent writers. + errCh chan error + // wg provides a count of how many writers we are waiting to finish. + wg sync.WaitGroup + + // result holds the final result from blob storage after we have submitted all chunks. + result CommitBlockListResponse +} + +// copierChunk contains buffer +type copierChunk struct { + buffer []byte + id string + length int +} + +// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error +// it returns that error. Otherwise, it is nil. getErr supports only returning an error once per copier. +func (c *copier) getErr() error { + select { + case err := <-c.errCh: + return err + default: + } + return c.ctx.Err() +} + +// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel. +// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF. +func (c *copier) sendChunk() error { + if err := c.getErr(); err != nil { + return err + } + + buffer := c.o.transferManager.Get() + if len(buffer) == 0 { + return fmt.Errorf("transferManager returned a 0 size buffer, this is a bug in the manager") + } + + n, err := io.ReadFull(c.reader, buffer) + if n > 0 { + // Some data was read, schedule the Write. + id := c.id.next() + c.wg.Add(1) + c.o.transferManager.Run( + func() { + defer c.wg.Done() + c.write(copierChunk{buffer: buffer, id: id, length: n}) + }, + ) + } else { + // Return the unused buffer to the manager. + c.o.transferManager.Put(buffer) + } + + if err == nil { + return nil + } else if err == io.EOF || err == io.ErrUnexpectedEOF { + return io.EOF + } + + if cerr := c.getErr(); cerr != nil { + return cerr + } + + return err +} + +// write uploads a chunk to blob storage. +func (c *copier) write(chunk copierChunk) { + defer c.o.transferManager.Put(chunk.buffer) + + if err := c.ctx.Err(); err != nil { + return + } + stageBlockOptions := c.o.getStageBlockOptions() + _, err := c.to.StageBlock(c.ctx, chunk.id, shared.NopCloser(bytes.NewReader(chunk.buffer[:chunk.length])), stageBlockOptions) + if err != nil { + select { + case c.errCh <- err: + // failed to stage block, cancel the copy + default: + // don't block the goroutine if there's a pending error + } + } +} + +// close commits our blocks to blob storage and closes our writer. +func (c *copier) close() error { + c.wg.Wait() + + if err := c.getErr(); err != nil { + return err + } + + var err error + commitBlockListOptions := c.o.getCommitBlockListOptions() + c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), commitBlockListOptions) + return err +} + +// id allows the creation of unique IDs based on UUID4 + an int32. This auto-increments. +type id struct { + u [64]byte + num uint32 + all []string +} + +// newID constructs a new id. +func newID(uu uuid.UUID) *id { + u := [64]byte{} + copy(u[:], uu[:]) + return &id{u: u} +} + +// next returns the next ID. +func (id *id) next() string { + defer atomic.AddUint32(&id.num, 1) + + binary.BigEndian.PutUint32(id.u[len(uuid.UUID{}):], atomic.LoadUint32(&id.num)) + str := base64.StdEncoding.EncodeToString(id.u[:]) + id.all = append(id.all, str) + + return str +} + +// issued returns all ids that have been issued. This returned value shares the internal slice, so it is not safe to modify the return. +// The value is only valid until the next time next() is called. +func (id *id) issued() []string { + return id.all +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go new file mode 100644 index 000000000000..4af33356e291 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -0,0 +1,481 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "io" + "os" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client defines a set of operations applicable to block blobs. +type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewBlockBlobClient(blobURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (bb *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) +} + +func (bb *Client) generated() *generated.BlockBlobClient { + _, blockBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) + return blockBlob +} + +// URL returns the URL endpoint used by the Client object. +func (bb *Client) URL() string { + return bb.generated().Endpoint() +} + +// BlobClient returns the embedded blob client for this AppendBlob client. +func (bb *Client) BlobClient() *blob.Client { + blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) + return (*blob.Client)(blobClient) +} + +// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(bb.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (bb *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(bb.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil +} + +// Upload creates a new block blob or overwrites an existing block blob. +// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not +// supported with Upload; the content of the existing blob is overwritten with the new content. To +// perform a partial update of a block blob, use StageBlock and CommitBlockList. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *UploadOptions) (UploadResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return UploadResponse{}, err + } + + opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() + + resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) + return resp, err +} + +// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. +func (bb *Client) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, options *StageBlockOptions) (StageBlockResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return StageBlockResponse{}, err + } + + opts, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format() + + resp, err := bb.generated().StageBlock(ctx, base64BlockID, count, body, opts, leaseAccessConditions, cpkInfo, cpkScopeInfo) + return resp, err +} + +// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// If count is CountToEnd (0), then data is read from specified offset to the end. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. +func (bb *Client) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, + contentLength int64, options *StageBlockFromURLOptions) (StageBlockFromURLResponse, error) { + + stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format() + + resp, err := bb.generated().StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions, + cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + + return resp, err +} + +// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written +// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob +// by uploading only those blocks that have changed, then committing the new and existing +// blocks together. Any blocks not specified in the block list and permanently deleted. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. +func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *CommitBlockListOptions) (CommitBlockListResponse, error) { + // this is a code smell in the generated code + blockIds := make([]*string, len(base64BlockIDs)) + for k, v := range base64BlockIDs { + blockIds[k] = to.Ptr(v) + } + + blockLookupList := generated.BlockLookupList{Latest: blockIds} + + var commitOptions *generated.BlockBlobClientCommitBlockListOptions + var headers *generated.BlobHTTPHeaders + var leaseAccess *blob.LeaseAccessConditions + var cpkInfo *generated.CpkInfo + var cpkScope *generated.CpkScopeInfo + var modifiedAccess *generated.ModifiedAccessConditions + + if options != nil { + commitOptions = &generated.BlockBlobClientCommitBlockListOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(options.Tags), + Metadata: options.Metadata, + RequestID: options.RequestID, + Tier: options.Tier, + Timeout: options.Timeout, + TransactionalContentCRC64: options.TransactionalContentCRC64, + TransactionalContentMD5: options.TransactionalContentMD5, + } + + headers = options.HTTPHeaders + leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions) + cpkInfo = options.CpkInfo + cpkScope = options.CpkScopeInfo + } + + resp, err := bb.generated().CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess) + return resp, err +} + +// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. +func (bb *Client) GetBlockList(ctx context.Context, listType BlockListType, options *GetBlockListOptions) (GetBlockListResponse, error) { + o, lac, mac := options.format() + + resp, err := bb.generated().GetBlockList(ctx, listType, o, lac, mac) + + return resp, err +} + +// Redeclared APIs ----- Copy over to Append blob and Page blob as well. + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (bb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return bb.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (bb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return bb.BlobClient().Undelete(ctx, o) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (bb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return bb.BlobClient().SetTier(ctx, tier, o) +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return bb.BlobClient().GetProperties(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return bb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (bb *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return bb.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (bb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return bb.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (bb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return bb.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (bb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return bb.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (bb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return bb.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (bb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return bb.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return bb.BlobClient().CopyFromURL(ctx, copySource, o) +} + +// Concurrent Upload Functions ----------------------------------------------------------------------------------------- + +// uploadFromReader uploads a buffer in blocks to a block blob. +func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, readerSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) { + if o.BlockSize == 0 { + // If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error + if readerSize > MaxStageBlockBytes*MaxBlocks { + return uploadFromReaderResponse{}, errors.New("buffer is too large to upload to a block blob") + } + // If bufferSize <= MaxUploadBlobBytes, then Upload should be used with just 1 I/O request + if readerSize <= MaxUploadBlobBytes { + o.BlockSize = MaxUploadBlobBytes // Default if unspecified + } else { + if remainder := readerSize % MaxBlocks; remainder > 0 { + // ensure readerSize is a multiple of MaxBlocks + readerSize += (MaxBlocks - remainder) + } + o.BlockSize = readerSize / MaxBlocks // buffer / max blocks = block size to use all 50,000 blocks + if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = blob.DefaultDownloadBlockSize + } + // StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize). + } + } + + if readerSize <= MaxUploadBlobBytes { + // If the size can fit in 1 Upload call, do it this way + var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) + if o.Progress != nil { + body = streaming.NewRequestProgress(shared.NopCloser(body), o.Progress) + } + + uploadBlockBlobOptions := o.getUploadBlockBlobOptions() + resp, err := bb.Upload(ctx, shared.NopCloser(body), uploadBlockBlobOptions) + + return toUploadReaderAtResponseFromUploadResponse(resp), err + } + + var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) + if numBlocks > MaxBlocks { + // prevent any math bugs from attempting to upload too many blocks which will always fail + return uploadFromReaderResponse{}, errors.New("block limit exceeded") + } + + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "uploadFromReader", + TransferSize: readerSize, + ChunkSize: o.BlockSize, + Concurrency: o.Concurrency, + Operation: func(offset int64, count int64, ctx context.Context) error { + // This function is called once per block. + // It is passed this block's offset within the buffer and its count of bytes + // Prepare to read the proper block/section of the buffer + var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) + blockNum := offset / o.BlockSize + if o.Progress != nil { + blockProgress := int64(0) + body = streaming.NewRequestProgress(shared.NopCloser(body), + func(bytesTransferred int64) { + diff := bytesTransferred - blockProgress + blockProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks + // at the same time causing PutBlockList to get a mix of blocks from all the clients. + generatedUuid, err := uuid.New() + if err != nil { + return err + } + blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String())) + stageBlockOptions := o.getStageBlockOptions() + _, err = bb.StageBlock(ctx, blockIDList[blockNum], shared.NopCloser(body), stageBlockOptions) + return err + }, + }) + if err != nil { + return uploadFromReaderResponse{}, err + } + // All put blocks were successful, call Put Block List to finalize the blob + commitBlockListOptions := o.getCommitBlockListOptions() + resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions) + + return toUploadReaderAtResponseFromCommitBlockListResponse(resp), err +} + +// UploadBuffer uploads a buffer in blocks to a block blob. +func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) { + uploadOptions := uploadFromReaderOptions{} + if o != nil { + uploadOptions = *o + } + return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions) +} + +// UploadFile uploads a file in blocks to a block blob. +func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) { + stat, err := file.Stat() + if err != nil { + return uploadFromReaderResponse{}, err + } + uploadOptions := uploadFromReaderOptions{} + if o != nil { + uploadOptions = *o + } + return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions) +} + +// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. +// A Context deadline or cancellation will cause this to error. +func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) { + if err := o.format(); err != nil { + return CommitBlockListResponse{}, err + } + + if o == nil { + o = &UploadStreamOptions{} + } + + // If we used the default manager, we need to close it. + if o.transferMangerNotSet { + defer o.transferManager.Close() + } + + result, err := copyFromReader(ctx, body, bb, *o) + if err != nil { + return CommitBlockListResponse{}, err + } + + return result, nil +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (bb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return bb.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (bb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return bb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (bb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return bb.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go new file mode 100644 index 000000000000..46de1ede7e16 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go @@ -0,0 +1,40 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +// nolint +const ( + // CountToEnd specifies the end of the file + CountToEnd = 0 + + _1MiB = 1024 * 1024 + + // MaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. + MaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB + + // MaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. + MaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB + + // MaxBlocks indicates the maximum number of blocks allowed in a block blob. + MaxBlocks = 50000 +) + +// BlockListType defines values for BlockListType +type BlockListType = generated.BlockListType + +const ( + BlockListTypeCommitted BlockListType = generated.BlockListTypeCommitted + BlockListTypeUncommitted BlockListType = generated.BlockListTypeUncommitted + BlockListTypeAll BlockListType = generated.BlockListTypeAll +) + +// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return generated.PossibleBlockListTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go new file mode 100644 index 000000000000..1e93143642d8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go @@ -0,0 +1,311 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// Type Declarations --------------------------------------------------------------------- + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block = generated.Block + +// BlockList - type of blocklist (committed/uncommitted) +type BlockList = generated.BlockList + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// UploadOptions contains the optional parameters for the Client.Upload method. +type UploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]string + + // Optional. Indicates the tier to be set on the blob. + Tier *blob.AccessTier + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + HTTPHeaders *blob.HTTPHeaders + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + AccessConditions *blob.AccessConditions +} + +func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, + *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + basics := generated.BlockBlobClientUploadOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + Metadata: o.Metadata, + Tier: o.Tier, + TransactionalContentMD5: o.TransactionalContentMD5, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StageBlockOptions contains the optional parameters for the Client.StageBlock method. +type StageBlockOptions struct { + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + LeaseAccessConditions *blob.LeaseAccessConditions + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// StageBlockOptions contains the optional parameters for the Client.StageBlock method. +func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOptions, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo) { + if o == nil { + return nil, nil, nil, nil + } + + return &generated.BlockBlobClientStageBlockOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + }, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StageBlockFromURLOptions contains the optional parameters for the Client.StageBlockFromURL method. +type StageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + LeaseAccessConditions *blob.LeaseAccessConditions + + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo +} + +func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBlockFromURLOptions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.LeaseAccessConditions, *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + options := &generated.BlockBlobClientStageBlockFromURLOptions{ + CopySourceAuthorization: o.CopySourceAuthorization, + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + SourceRange: exported.FormatHTTPRange(o.Range), + } + + return options, o.CpkInfo, o.CpkScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CommitBlockListOptions contains the optional parameters for Client.CommitBlockList method. +type CommitBlockListOptions struct { + Tags map[string]string + Metadata map[string]string + RequestID *string + Tier *blob.AccessTier + Timeout *int32 + TransactionalContentCRC64 []byte + TransactionalContentMD5 []byte + HTTPHeaders *blob.HTTPHeaders + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + AccessConditions *blob.AccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetBlockListOptions contains the optional parameters for the Client.GetBlockList method. +type GetBlockListOptions struct { + Snapshot *string + AccessConditions *blob.AccessConditions +} + +func (o *GetBlockListOptions) format() (*generated.BlockBlobClientGetBlockListOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.BlockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions +} + +// ------------------------------------------------------------ + +// uploadFromReaderOptions identifies options used by the UploadBuffer and UploadFile functions. +type uploadFromReaderOptions struct { + // BlockSize specifies the block size to use; the default (and maximum size) is MaxStageBlockBytes. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress func(bytesTransferred int64) + + // HTTPHeaders indicates the HTTP headers to be associated with the blob. + HTTPHeaders *blob.HTTPHeaders + + // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. + Metadata map[string]string + + // AccessConditions indicates the access conditions for the block blob. + AccessConditions *blob.AccessConditions + + // AccessTier indicates the tier of blob + AccessTier *blob.AccessTier + + // BlobTags + Tags map[string]string + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + + // Concurrency indicates the maximum number of blocks to upload in parallel (0=default) + Concurrency uint16 + + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 *[]byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 *[]byte +} + +// UploadBufferOptions provides set of configurations for UploadBuffer operation +type UploadBufferOptions = uploadFromReaderOptions + +// UploadFileOptions provides set of configurations for UploadFile operation +type UploadFileOptions = uploadFromReaderOptions + +func (o *uploadFromReaderOptions) getStageBlockOptions() *StageBlockOptions { + leaseAccessConditions, _ := exported.FormatBlobAccessConditions(o.AccessConditions) + return &StageBlockOptions{ + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + } +} + +func (o *uploadFromReaderOptions) getUploadBlockBlobOptions() *UploadOptions { + return &UploadOptions{ + Tags: o.Tags, + Metadata: o.Metadata, + Tier: o.AccessTier, + HTTPHeaders: o.HTTPHeaders, + AccessConditions: o.AccessConditions, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + } +} + +func (o *uploadFromReaderOptions) getCommitBlockListOptions() *CommitBlockListOptions { + return &CommitBlockListOptions{ + Tags: o.Tags, + Metadata: o.Metadata, + Tier: o.AccessTier, + HTTPHeaders: o.HTTPHeaders, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadStreamOptions provides set of configurations for UploadStream operation +type UploadStreamOptions struct { + // transferManager provides a transferManager that controls buffer allocation/reuse and + // concurrency. This overrides BlockSize and MaxConcurrency if set. + transferManager shared.TransferManager + transferMangerNotSet bool + + // BlockSize defines the size of the buffer used during upload. The default and mimimum value is 1 MiB. + BlockSize int + + // Concurrency defines the number of concurrent uploads to be performed to upload the file. + // Each concurrent upload will create a buffer of size BlockSize. The default value is one. + Concurrency int + + HTTPHeaders *blob.HTTPHeaders + Metadata map[string]string + AccessConditions *blob.AccessConditions + AccessTier *blob.AccessTier + Tags map[string]string + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo +} + +func (u *UploadStreamOptions) format() error { + if u == nil || u.transferManager != nil { + return nil + } + + if u.Concurrency == 0 { + u.Concurrency = 1 + } + + if u.BlockSize < _1MiB { + u.BlockSize = _1MiB + } + + var err error + u.transferManager, err = shared.NewStaticBuffer(u.BlockSize, u.Concurrency) + if err != nil { + return fmt.Errorf("bug: default transfer manager could not be created: %s", err) + } + u.transferMangerNotSet = true + return nil +} + +func (u *UploadStreamOptions) getStageBlockOptions() *StageBlockOptions { + leaseAccessConditions, _ := exported.FormatBlobAccessConditions(u.AccessConditions) + return &StageBlockOptions{ + CpkInfo: u.CpkInfo, + CpkScopeInfo: u.CpkScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + } +} + +func (u *UploadStreamOptions) getCommitBlockListOptions() *CommitBlockListOptions { + options := &CommitBlockListOptions{ + Tags: u.Tags, + Metadata: u.Metadata, + Tier: u.AccessTier, + HTTPHeaders: u.HTTPHeaders, + CpkInfo: u.CpkInfo, + CpkScopeInfo: u.CpkScopeInfo, + AccessConditions: u.AccessConditions, + } + + return options +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go new file mode 100644 index 000000000000..dab1873b1a9d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go @@ -0,0 +1,111 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// UploadResponse contains the response from method Client.Upload. +type UploadResponse = generated.BlockBlobClientUploadResponse + +// StageBlockResponse contains the response from method Client.StageBlock. +type StageBlockResponse = generated.BlockBlobClientStageBlockResponse + +// CommitBlockListResponse contains the response from method Client.CommitBlockList. +type CommitBlockListResponse = generated.BlockBlobClientCommitBlockListResponse + +// StageBlockFromURLResponse contains the response from method Client.StageBlockFromURL. +type StageBlockFromURLResponse = generated.BlockBlobClientStageBlockFromURLResponse + +// GetBlockListResponse contains the response from method Client.GetBlockList. +type GetBlockListResponse = generated.BlockBlobClientGetBlockListResponse + +// uploadFromReaderResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type uploadFromReaderResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + // Will be a part of response only if uploading data >= internal.MaxUploadBlobBytes (= 256 * 1024 * 1024 // 256MB) + ContentCRC64 []byte +} + +func toUploadReaderAtResponseFromUploadResponse(resp UploadResponse) uploadFromReaderResponse { + return uploadFromReaderResponse{ + ClientRequestID: resp.ClientRequestID, + ContentMD5: resp.ContentMD5, + Date: resp.Date, + ETag: resp.ETag, + EncryptionKeySHA256: resp.EncryptionKeySHA256, + EncryptionScope: resp.EncryptionScope, + IsServerEncrypted: resp.IsServerEncrypted, + LastModified: resp.LastModified, + RequestID: resp.RequestID, + Version: resp.Version, + VersionID: resp.VersionID, + } +} + +func toUploadReaderAtResponseFromCommitBlockListResponse(resp CommitBlockListResponse) uploadFromReaderResponse { + return uploadFromReaderResponse{ + ClientRequestID: resp.ClientRequestID, + ContentMD5: resp.ContentMD5, + Date: resp.Date, + ETag: resp.ETag, + EncryptionKeySHA256: resp.EncryptionKeySHA256, + EncryptionScope: resp.EncryptionScope, + IsServerEncrypted: resp.IsServerEncrypted, + LastModified: resp.LastModified, + RequestID: resp.RequestID, + Version: resp.Version, + VersionID: resp.VersionID, + ContentCRC64: resp.XMSContentCRC64, + } +} + +// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadFileResponse = uploadFromReaderResponse + +// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadBufferResponse = uploadFromReaderResponse + +// UploadStreamResponse contains the response from method Client.CommitBlockList. +type UploadStreamResponse = CommitBlockListResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml new file mode 100644 index 000000000000..e0623f50e855 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml @@ -0,0 +1,28 @@ +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azblob + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azblob + + +stages: + - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'storage/azblob' + RunLiveTests: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go new file mode 100644 index 000000000000..4cc9a51bb402 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go @@ -0,0 +1,171 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "context" + "io" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type Client struct { + svc *service.Client +} + +// NewClient creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + var clientOptions *service.ClientOptions + if options != nil { + clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} + } + svcClient, err := service.NewClient(serviceURL, cred, clientOptions) + if err != nil { + return nil, err + } + + return &Client{ + svc: svcClient, + }, nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { + var clientOptions *service.ClientOptions + if options != nil { + clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} + } + svcClient, err := service.NewClientWithNoCredential(serviceURL, clientOptions) + if err != nil { + return nil, err + } + + return &Client{ + svc: svcClient, + }, nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, (*service.ClientOptions)(options)) + if err != nil { + return nil, err + } + + return &Client{ + svc: svcClient, + }, nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { + if options == nil { + options = &ClientOptions{} + } + containerClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) + if err != nil { + return nil, err + } + return &Client{ + svc: containerClient, + }, nil +} + +// URL returns the URL endpoint used by the BlobClient object. +func (c *Client) URL() string { + return c.svc.URL() +} + +// CreateContainer is a lifecycle method to creates a new container under the specified account. +// If the container with the same name already exists, a ResourceExistsError will be raised. +// This method returns a client with which to interact with the newly created container. +func (c *Client) CreateContainer(ctx context.Context, containerName string, o *CreateContainerOptions) (CreateContainerResponse, error) { + return c.svc.CreateContainer(ctx, containerName, o) +} + +// DeleteContainer is a lifecycle method that marks the specified container for deletion. +// The container and any blobs contained within it are later deleted during garbage collection. +// If the container is not found, a ResourceNotFoundError will be raised. +func (c *Client) DeleteContainer(ctx context.Context, containerName string, o *DeleteContainerOptions) (DeleteContainerResponse, error) { + return c.svc.DeleteContainer(ctx, containerName, o) +} + +// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (c *Client) DeleteBlob(ctx context.Context, containerName string, blobName string, o *DeleteBlobOptions) (DeleteBlobResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).Delete(ctx, o) +} + +// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c *Client) NewListBlobsFlatPager(containerName string, o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] { + return c.svc.NewContainerClient(containerName).NewListBlobsFlatPager(o) +} + +// NewListContainersPager operation returns a pager of the containers under the specified account. +// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (c *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] { + return c.svc.NewListContainersPager(o) +} + +// UploadBuffer uploads a buffer in blocks to a block blob. +func (c *Client) UploadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadBuffer(ctx, buffer, o) +} + +// UploadFile uploads a file in blocks to a block blob. +func (c *Client) UploadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadFile(ctx, file, o) +} + +// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. +// A Context deadline or cancellation will cause this to error. +func (c *Client) UploadStream(ctx context.Context, containerName string, blobName string, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadStream(ctx, body, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (c *Client) DownloadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *DownloadBufferOptions) (int64, error) { + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (c *Client) DownloadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *DownloadFileOptions) (int64, error) { + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadFile(ctx, file, o) +} + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (c *Client) DownloadStream(ctx context.Context, containerName string, blobName string, o *DownloadStreamOptions) (DownloadStreamResponse, error) { + o = shared.CopyOptions(o) + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadStream(ctx, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go new file mode 100644 index 000000000000..560e151d553f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go new file mode 100644 index 000000000000..d4c3262d932d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// PublicAccessType defines values for AccessType - private (default) or blob or container +type PublicAccessType = generated.PublicAccessType + +const ( + PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob + PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return generated.PossiblePublicAccessTypeValues() +} + +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType +type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return generated.PossibleDeleteSnapshotsOptionTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go new file mode 100644 index 000000000000..e4afad9f84f1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go @@ -0,0 +1,334 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "context" + "errors" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a URL to the Azure Storage container allowing you to manipulate its blobs. +type Client base.Client[generated.ContainerClient] + +// NewClient creates an instance of Client with the specified values. +// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a container or with a shared access signature (SAS) token. +// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container +// - cred - a SharedKeyCredential created with the matching container's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewContainerClient(containerURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (c *Client) generated() *generated.ContainerClient { + return base.InnerClient((*base.Client[generated.ContainerClient])(c)) +} + +func (c *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ContainerClient])(c)) +} + +// URL returns the URL endpoint used by the Client object. +func (c *Client) URL() string { + return c.generated().Endpoint() +} + +// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of +// Client's URL. The new BlobClient uses the same request policy pipeline as the Client. +// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's +// NewBlobClient method. +func (c *Client) NewBlobClient(blobName string) *blob.Client { + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) +} + +// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of +// Client's URL. The new AppendBlobURL uses the same request policy pipeline as the Client. +// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's +// NewAppendBlobClient method. +func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) +} + +// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of +// Client's URL. The new BlockBlobClient uses the same request policy pipeline as the Client. +// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's +// NewBlockBlobClient method. +func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) +} + +// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of Client's URL. The new PageBlobURL uses the same request policy pipeline as the Client. +// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's +// NewPageBlobClient method. +func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client { + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) +} + +// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. +func (c *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { + var opts *generated.ContainerClientCreateOptions + var cpkScopes *generated.ContainerCpkScopeInfo + if options != nil { + opts = &generated.ContainerClientCreateOptions{ + Access: options.Access, + Metadata: options.Metadata, + } + cpkScopes = options.CpkScopeInfo + } + resp, err := c.generated().Create(ctx, opts, cpkScopes) + + return resp, err +} + +// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. +func (c *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := options.format() + resp, err := c.generated().Delete(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + + return resp, err +} + +// Restore operation restore the contents and properties of a soft deleted container to a specified container. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/restore-container. +func (c *Client) Restore(ctx context.Context, deletedContainerVersion string, options *RestoreOptions) (RestoreResponse, error) { + urlParts, err := blob.ParseURL(c.URL()) + if err != nil { + return RestoreResponse{}, err + } + + opts := &generated.ContainerClientRestoreOptions{ + DeletedContainerName: &urlParts.ContainerName, + DeletedContainerVersion: &deletedContainerVersion, + } + resp, err := c.generated().Restore(ctx, opts) + + return resp, err +} + +// GetProperties returns the container's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. +func (c *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { + // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. + // This allows us to not expose a GetProperties method at all simplifying the API. + // The optionals are nil, like they were in track 1.5 + opts, leaseAccessConditions := o.format() + + resp, err := c.generated().GetProperties(ctx, opts, leaseAccessConditions) + return resp, err +} + +// SetMetadata sets the container's metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. +func (c *Client) SetMetadata(ctx context.Context, o *SetMetadataOptions) (SetMetadataResponse, error) { + metadataOptions, lac, mac := o.format() + resp, err := c.generated().SetMetadata(ctx, metadataOptions, lac, mac) + + return resp, err +} + +// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. +func (c *Client) GetAccessPolicy(ctx context.Context, o *GetAccessPolicyOptions) (GetAccessPolicyResponse, error) { + options, ac := o.format() + resp, err := c.generated().GetAccessPolicy(ctx, options, ac) + return resp, err +} + +// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. +func (c *Client) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, o *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { + accessPolicy, mac, lac := o.format() + resp, err := c.generated().SetAccessPolicy(ctx, containerACL, accessPolicy, mac, lac) + return resp, err +} + +// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] { + listOptions := generated.ContainerClientListBlobFlatSegmentOptions{} + if o != nil { + listOptions.Include = o.Include.format() + listOptions.Marker = o.Marker + listOptions.Maxresults = o.MaxResults + listOptions.Prefix = o.Prefix + } + return runtime.NewPager(runtime.PagingHandler[ListBlobsFlatResponse]{ + More: func(page ListBlobsFlatResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListBlobsFlatResponse) (ListBlobsFlatResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = c.generated().ListBlobFlatSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = c.generated().ListBlobFlatSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListBlobsFlatResponse{}, err + } + resp, err := c.generated().Pipeline().Do(req) + if err != nil { + return ListBlobsFlatResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + // TOOD: storage error? + return ListBlobsFlatResponse{}, runtime.NewResponseError(resp) + } + return c.generated().ListBlobFlatSegmentHandleResponse(resp) + }, + }) +} + +// NewListBlobsHierarchyPager returns a channel of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored. +// AutoPagerBufferSize specifies the channel's buffer size. +// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.) +func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierarchyOptions) *runtime.Pager[ListBlobsHierarchyResponse] { + listOptions := o.format() + return runtime.NewPager(runtime.PagingHandler[ListBlobsHierarchyResponse]{ + More: func(page ListBlobsHierarchyResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListBlobsHierarchyResponse) (ListBlobsHierarchyResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = c.generated().ListBlobHierarchySegmentCreateRequest(ctx, delimiter, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = c.generated().ListBlobHierarchySegmentCreateRequest(ctx, delimiter, &listOptions) + } + if err != nil { + return ListBlobsHierarchyResponse{}, err + } + resp, err := c.generated().Pipeline().Do(req) + if err != nil { + return ListBlobsHierarchyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListBlobsHierarchyResponse{}, runtime.NewResponseError(resp) + } + return c.generated().ListBlobHierarchySegmentHandleResponse(resp) + }, + }) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (c *Client) GetSASURL(permissions sas.ContainerPermissions, start time.Time, expiry time.Time) (string, error) { + if c.sharedKey() == nil { + return "", errors.New("SAS can only be signed with a SharedKeyCredential") + } + + urlParts, err := blob.ParseURL(c.URL()) + if err != nil { + return "", err + } + + // Containers do not have snapshots, nor versions. + qps, err := sas.BlobSignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + ContainerName: urlParts.ContainerName, + Permissions: permissions.String(), + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(c.sharedKey()) + if err != nil { + return "", err + } + + endpoint := c.URL() + "?" + qps.Encode() + + return endpoint, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go new file mode 100644 index 000000000000..ce6e67b5ed4a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go @@ -0,0 +1,166 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +// PublicAccessType defines values for AccessType - private (default) or blob or container +type PublicAccessType = generated.PublicAccessType + +const ( + PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob + PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return generated.PossiblePublicAccessTypeValues() +} + +// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS +type SKUName = generated.SKUName + +const ( + SKUNameStandardLRS SKUName = generated.SKUNameStandardLRS + SKUNameStandardGRS SKUName = generated.SKUNameStandardGRS + SKUNameStandardRAGRS SKUName = generated.SKUNameStandardRAGRS + SKUNameStandardZRS SKUName = generated.SKUNameStandardZRS + SKUNamePremiumLRS SKUName = generated.SKUNamePremiumLRS +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return generated.PossibleSKUNameValues() +} + +// AccountKind defines values for AccountKind +type AccountKind = generated.AccountKind + +const ( + AccountKindStorage AccountKind = generated.AccountKindStorage + AccountKindBlobStorage AccountKind = generated.AccountKindBlobStorage + AccountKindStorageV2 AccountKind = generated.AccountKindStorageV2 + AccountKindFileStorage AccountKind = generated.AccountKindFileStorage + AccountKindBlockBlobStorage AccountKind = generated.AccountKindBlockBlobStorage +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return generated.PossibleAccountKindValues() +} + +// BlobType defines values for BlobType +type BlobType = generated.BlobType + +const ( + BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob + BlobTypePageBlob BlobType = generated.BlobTypePageBlob + BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return generated.PossibleBlobTypeValues() +} + +// LeaseStatusType defines values for LeaseStatusType +type LeaseStatusType = generated.LeaseStatusType + +const ( + LeaseStatusTypeLocked LeaseStatusType = generated.LeaseStatusTypeLocked + LeaseStatusTypeUnlocked LeaseStatusType = generated.LeaseStatusTypeUnlocked +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return generated.PossibleLeaseStatusTypeValues() +} + +// LeaseDurationType defines values for LeaseDurationType +type LeaseDurationType = generated.LeaseDurationType + +const ( + LeaseDurationTypeInfinite LeaseDurationType = generated.LeaseDurationTypeInfinite + LeaseDurationTypeFixed LeaseDurationType = generated.LeaseDurationTypeFixed +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return generated.PossibleLeaseDurationTypeValues() +} + +// LeaseStateType defines values for LeaseStateType +type LeaseStateType = generated.LeaseStateType + +const ( + LeaseStateTypeAvailable LeaseStateType = generated.LeaseStateTypeAvailable + LeaseStateTypeLeased LeaseStateType = generated.LeaseStateTypeLeased + LeaseStateTypeExpired LeaseStateType = generated.LeaseStateTypeExpired + LeaseStateTypeBreaking LeaseStateType = generated.LeaseStateTypeBreaking + LeaseStateTypeBroken LeaseStateType = generated.LeaseStateTypeBroken +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return generated.PossibleLeaseStateTypeValues() +} + +// ArchiveStatus defines values for ArchiveStatus +type ArchiveStatus = generated.ArchiveStatus + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool + ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return generated.PossibleArchiveStatusValues() +} + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode +type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return generated.PossibleImmutabilityPolicyModeValues() +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority = generated.RehydratePriority + +const ( + RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh + RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return generated.PossibleRehydratePriorityValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go new file mode 100644 index 000000000000..eb65d5fc3c80 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go @@ -0,0 +1,263 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method. +type CpkScopeInfo = generated.ContainerCpkScopeInfo + +// BlobProperties - Properties of a blob +type BlobProperties = generated.BlobPropertiesInternal + +// BlobItem - An Azure Storage blob +type BlobItem = generated.BlobItemInternal + +// AccessConditions identifies container-specific access conditions which you optionally set. +type AccessConditions = exported.ContainerAccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions + +// AccessPolicy - An Access policy +type AccessPolicy = generated.AccessPolicy + +// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission = exported.AccessPolicyPermission + +// SignedIdentifier - signed identifier +type SignedIdentifier = generated.SignedIdentifier + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]string + + // Optional. Specifies the encryption scope settings to set on the container. + CpkScopeInfo *CpkScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + AccessConditions *AccessConditions +} + +func (o *DeleteOptions) format() (*generated.ContainerClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatContainerAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// RestoreOptions contains the optional parameters for the Client.Restore method. +type RestoreOptions struct { + // placeholder for future options +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type GetPropertiesOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +func (o *GetPropertiesOptions) format() (*generated.ContainerClientGetPropertiesOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListBlobsInclude indicates what additional information the service should return with each blob. +type ListBlobsInclude struct { + Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool +} + +func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem { + if reflect.ValueOf(l).IsZero() { + return nil + } + + include := []generated.ListBlobsIncludeItem{} + + if l.Copy { + include = append(include, generated.ListBlobsIncludeItemCopy) + } + if l.Deleted { + include = append(include, generated.ListBlobsIncludeItemDeleted) + } + if l.DeletedWithVersions { + include = append(include, generated.ListBlobsIncludeItemDeletedwithversions) + } + if l.ImmutabilityPolicy { + include = append(include, generated.ListBlobsIncludeItemImmutabilitypolicy) + } + if l.LegalHold { + include = append(include, generated.ListBlobsIncludeItemLegalhold) + } + if l.Metadata { + include = append(include, generated.ListBlobsIncludeItemMetadata) + } + if l.Snapshots { + include = append(include, generated.ListBlobsIncludeItemSnapshots) + } + if l.Tags { + include = append(include, generated.ListBlobsIncludeItemTags) + } + if l.UncommittedBlobs { + include = append(include, generated.ListBlobsIncludeItemUncommittedblobs) + } + if l.Versions { + include = append(include, generated.ListBlobsIncludeItemVersions) + } + + return include +} + +// ListBlobsFlatOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment method. +type ListBlobsFlatOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include ListBlobsInclude + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListBlobsHierarchyOptions provides set of configurations for Client.NewListBlobsHierarchyPager +type ListBlobsHierarchyOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include ListBlobsInclude + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment method. +func (o *ListBlobsHierarchyOptions) format() generated.ContainerClientListBlobHierarchySegmentOptions { + if o == nil { + return generated.ContainerClientListBlobHierarchySegmentOptions{} + } + + return generated.ContainerClientListBlobHierarchySegmentOptions{ + Include: o.Include.format(), + Marker: o.Marker, + Maxresults: o.MaxResults, + Prefix: o.Prefix, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. +type SetMetadataOptions struct { + Metadata map[string]string + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *SetMetadataOptions) format() (*generated.ContainerClientSetMetadataOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + return &generated.ContainerClientSetMetadataOptions{Metadata: o.Metadata}, o.LeaseAccessConditions, o.ModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccessPolicyOptions contains the optional parameters for the Client.GetAccessPolicy method. +type GetAccessPolicyOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetAccessPolicyOptions) format() (*generated.ContainerClientGetAccessPolicyOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation +type SetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + AccessConditions *AccessConditions +} + +func (o *SetAccessPolicyOptions) format() (*generated.ContainerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + lac, mac := exported.FormatContainerAccessConditions(o.AccessConditions) + return &generated.ContainerClientSetAccessPolicyOptions{ + Access: o.Access, + }, lac, mac +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go new file mode 100644 index 000000000000..9d8672b135be --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.ContainerClientCreateResponse + +// DeleteResponse contains the response from method Client.Delete. +type DeleteResponse = generated.ContainerClientDeleteResponse + +// RestoreResponse contains the response from method Client.Restore. +type RestoreResponse = generated.ContainerClientRestoreResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.ContainerClientGetPropertiesResponse + +// ListBlobsFlatResponse contains the response from method Client.ListBlobFlatSegment. +type ListBlobsFlatResponse = generated.ContainerClientListBlobFlatSegmentResponse + +// ListBlobsHierarchyResponse contains the response from method Client.ListBlobHierarchySegment. +type ListBlobsHierarchyResponse = generated.ContainerClientListBlobHierarchySegmentResponse + +// SetMetadataResponse contains the response from method Client.SetMetadata. +type SetMetadataResponse = generated.ContainerClientSetMetadataResponse + +// GetAccessPolicyResponse contains the response from method Client.GetAccessPolicy. +type GetAccessPolicyResponse = generated.ContainerClientGetAccessPolicyResponse + +// SetAccessPolicyResponse contains the response from method Client.SetAccessPolicy. +type SetAccessPolicyResponse = generated.ContainerClientSetAccessPolicyResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go new file mode 100644 index 000000000000..d5b6ed6a7046 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go @@ -0,0 +1,213 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +/* + +Package azblob can access an Azure Blob Storage. + +The azblob package is capable of :- + - Creating, deleting, and querying containers in an account + - Creating, deleting, and querying blobs in a container + - Creating Shared Access Signature for authentication + +Types of Resources + +The azblob package allows you to interact with three types of resources :- + +* Azure storage accounts. +* Containers within those storage accounts. +* Blobs (block blobs/ page blobs/ append blobs) within those containers. + +The Azure Blob Storage (azblob) client library for Go allows you to interact with each of these components through the use of a dedicated client object. +To create a client object, you will need the account's blob service endpoint URL and a credential that allows you to access the account. + +Types of Credentials + +The clients support different forms of authentication. +The azblob library supports any of the `azcore.TokenCredential` interfaces, authorization via a Connection String, +or authorization with a Shared Access Signature token. + +Using a Shared Key + +To use an account shared key (aka account key or access key), provide the key as a string. +This can be found in your storage account in the Azure Portal under the "Access Keys" section. + +Use the key as the credential parameter to authenticate the client: + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) + + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil) + handle(err) + + fmt.Println(serviceClient.URL()) + +Using a Connection String + +Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately. +To do this, pass the connection string to the service client's `NewServiceClientFromConnectionString` method. +The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section. + + connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" + serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil) + +Using a Shared Access Signature (SAS) Token + +To use a shared access signature (SAS) token, provide the token at the end of your service URL. +You can generate a SAS token from the Azure Portal under Shared Access Signature or use the ServiceClient.GetSASToken() functions. + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) + + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + handle(err) + serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil) + handle(err) + fmt.Println(serviceClient.URL()) + + // Alternatively, you can create SAS on the fly + + resources := azblob.AccountSASResourceTypes{Service: true} + permission := azblob.AccountSASPermissions{Read: true} + start := time.Now() + expiry := start.AddDate(0, 0, 1) + serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, start, expiry) + handle(err) + + serviceClientWithSAS, err := azblob.NewServiceClientWithNoCredential(serviceURLWithSAS, nil) + handle(err) + + fmt.Println(serviceClientWithSAS.URL()) + +Types of Clients + +There are three different clients provided to interact with the various components of the Blob Service: + +1. **`ServiceClient`** + * Get and set account settings. + * Query, create, and delete containers within the account. + +2. **`ContainerClient`** + * Get and set container access settings, properties, and metadata. + * Create, delete, and query blobs within the container. + * `ContainerLeaseClient` to support container lease management. + +3. **`BlobClient`** + * `AppendBlobClient`, `BlockBlobClient`, and `PageBlobClient` + * Get and set blob properties. + * Perform CRUD operations on a given blob. + * `BlobLeaseClient` to support blob lease management. + +Examples + + // Your account name and key can be obtained from the Azure Portal. + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + // The service URL for blob endpoints is usually in the form: http(s)://.blob.core.windows.net/ + serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) + handle(err) + + // ===== 1. Create a container ===== + + // First, create a container client, and use the Create method to create a new container in your account + containerClient, err := serviceClient.NewContainerClient("testcontainer") + handle(err) + + // All APIs have an options' bag struct as a parameter. + // The options' bag struct allows you to specify optional parameters such as metadata, public access types, etc. + // If you want to use the default options, pass in nil. + _, err = containerClient.Create(context.TODO(), nil) + handle(err) + + // ===== 2. Upload and Download a block blob ===== + uploadData := "Hello world!" + + // Create a new blockBlobClient from the containerClient + blockBlobClient, err := containerClient.NewBlockBlobClient("HelloWorld.txt") + handle(err) + + // Upload data to the block blob + blockBlobUploadOptions := azblob.BlockBlobUploadOptions{ + Metadata: map[string]string{"Foo": "Bar"}, + TagsMap: map[string]string{"Year": "2022"}, + } + _, err = blockBlobClient.Upload(context.TODO(), streaming.NopCloser(strings.NewReader(uploadData)), &blockBlobUploadOptions) + handle(err) + + // Download the blob's contents and ensure that the download worked properly + blobDownloadResponse, err := blockBlobClient.DownloadStream(context.TODO(), nil) + handle(err) + + // Use the bytes.Buffer object to read the downloaded data. + // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. + reader := blobDownloadResponse.Body(nil) + downloadData, err := io.ReadAll(reader) + handle(err) + if string(downloadData) != uploadData { + handle(errors.New("Uploaded data should be same as downloaded data")) + } + + + if err = reader.Close(); err != nil { + handle(err) + return + } + + // ===== 3. List blobs ===== + // List methods returns a pager object which can be used to iterate over the results of a paging operation. + // To iterate over a page use the NextPage(context.Context) to fetch the next page of results. + // PageResponse() can be used to iterate over the results of the specific page. + // Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results. + pager := containerClient.ListBlobsFlat(nil) + for pager.NextPage(context.TODO()) { + resp := pager.PageResponse() + for _, v := range resp.Segment.BlobItems { + fmt.Println(*v.Name) + } + } + + if err = pager.Err(); err != nil { + handle(err) + } + + // Delete the blob. + _, err = blockBlobClient.Delete(context.TODO(), nil) + handle(err) + + // Delete the container. + _, err = containerClient.Delete(context.TODO(), nil) + handle(err) +*/ + +package azblob diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go new file mode 100644 index 000000000000..16e6cac066db --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -0,0 +1,89 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package base + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +type Client[T any] struct { + inner *T + sharedKey *exported.SharedKeyCredential +} + +func InnerClient[T any](client *Client[T]) *T { + return client.inner +} + +func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential { + return client.sharedKey +} + +func NewClient[T any](inner *T) *Client[T] { + return &Client[T]{inner: inner} +} + +func NewServiceClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ServiceClient] { + return &Client[generated.ServiceClient]{ + inner: generated.NewServiceClient(containerURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewContainerClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ContainerClient] { + return &Client[generated.ContainerClient]{ + inner: generated.NewContainerClient(containerURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.BlobClient] { + return &Client[generated.BlobClient]{ + inner: generated.NewBlobClient(blobURL, pipeline), + sharedKey: sharedKey, + } +} + +type CompositeClient[T, U any] struct { + innerT *T + innerU *U + sharedKey *exported.SharedKeyCredential +} + +func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) { + return &Client[T]{inner: client.innerT}, client.innerU +} + +func NewAppendBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { + return &CompositeClient[generated.BlobClient, generated.AppendBlobClient]{ + innerT: generated.NewBlobClient(blobURL, pipeline), + innerU: generated.NewAppendBlobClient(blobURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewBlockBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { + return &CompositeClient[generated.BlobClient, generated.BlockBlobClient]{ + innerT: generated.NewBlobClient(blobURL, pipeline), + innerU: generated.NewBlockBlobClient(blobURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewPageBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { + return &CompositeClient[generated.BlobClient, generated.PageBlobClient]{ + innerT: generated.NewBlobClient(blobURL, pipeline), + innerU: generated.NewPageBlobClient(blobURL, pipeline), + sharedKey: sharedKey, + } +} + +func SharedKeyComposite[T, U any](client *CompositeClient[T, U]) *exported.SharedKeyCredential { + return client.sharedKey +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go new file mode 100644 index 000000000000..96d188fa5678 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +const SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" + +// ContainerAccessConditions identifies container-specific access conditions which you optionally set. +type ContainerAccessConditions struct { + ModifiedAccessConditions *ModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +func FormatContainerAccessConditions(b *ContainerAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) { + if b == nil { + return nil, nil + } + return b.LeaseAccessConditions, b.ModifiedAccessConditions +} + +// BlobAccessConditions identifies blob-specific access conditions which you optionally set. +type BlobAccessConditions struct { + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func FormatBlobAccessConditions(b *BlobAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) { + if b == nil { + return nil, nil + } + return b.LeaseAccessConditions, b.ModifiedAccessConditions +} + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = generated.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = generated.ModifiedAccessConditions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go new file mode 100644 index 000000000000..14c293cf6569 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "fmt" +) + +// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Add, Create, Write, Delete, List bool +} + +// String produces the access policy permission string for an Azure Storage container. +// Call this method to set AccessPolicy's Permission field. +func (p *AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go new file mode 100644 index 000000000000..9bc1ca47df84 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go @@ -0,0 +1,33 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "fmt" + "strconv" +) + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange struct { + Offset int64 + Count int64 +} + +// FormatHTTPRange converts an HTTPRange to its string format. +func FormatHTTPRange(r HTTPRange) *string { + if r.Offset == 0 && r.Count == 0 { + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) + return &dataRange +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go new file mode 100644 index 000000000000..d1563105423f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go @@ -0,0 +1,218 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "sync/atomic" + "time" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCredential, error) { + c := SharedKeyCredential{accountName: accountName} + if err := c.SetAccountKey(accountKey); err != nil { + return nil, err + } + return &c, nil +} + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey atomic.Value // []byte +} + +// AccountName returns the Storage account's name. +func (c *SharedKeyCredential) AccountName() string { + return c.accountName +} + +// SetAccountKey replaces the existing account key with the specified account key. +func (c *SharedKeyCredential) SetAccountKey(accountKey string) error { + _bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return fmt.Errorf("decode account key: %w", err) + } + c.accountKey.Store(_bytes) + return nil +} + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (c *SharedKeyCredential) computeHMACSHA256(message string) (string, error) { + h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := req.Header + contentLength := getHeader(shared.HeaderContentLength, headers) + if contentLength == "0" { + contentLength = "" + } + + canonicalizedResource, err := c.buildCanonicalizedResource(req.URL) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{ + req.Method, + getHeader(shared.HeaderContentEncoding, headers), + getHeader(shared.HeaderContentLanguage, headers), + contentLength, + getHeader(shared.HeaderContentMD5, headers), + getHeader(shared.HeaderContentType, headers), + "", // Empty date because x-ms-date is expected (as per web page above) + getHeader(shared.HeaderIfModifiedSince, headers), + getHeader(shared.HeaderIfMatch, headers), + getHeader(shared.HeaderIfNoneMatch, headers), + getHeader(shared.HeaderIfUnmodifiedSince, headers), + getHeader(shared.HeaderRange, headers), + c.buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + return stringToSign, nil +} + +func getHeader(key string, headers map[string][]string) string { + if headers == nil { + return "" + } + if v, ok := headers[key]; ok { + if len(v) > 0 { + return v[0] + } + } + + return "" +} + +func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + sort.Strings(keys) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return ch.String() +} + +func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(c.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + return "", fmt.Errorf("failed to parse query params: %w", err) + } + + if len(params) > 0 { // There is at least 1 query parameter + var paramNames []string // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + } + } + return cr.String(), nil +} + +// ComputeHMACSHA256 is a helper for computing the signed string outside of this package. +func ComputeHMACSHA256(cred *SharedKeyCredential, message string) (string, error) { + return cred.computeHMACSHA256(message) +} + +// the following content isn't actually exported but must live +// next to SharedKeyCredential as it uses its unexported methods + +type SharedKeyCredPolicy struct { + cred *SharedKeyCredential +} + +func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { + return &SharedKeyCredPolicy{cred: cred} +} + +func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { + req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) + } + stringToSign, err := s.cred.buildStringToSign(req.Raw()) + if err != nil { + return nil, err + } + signature, err := s.cred.computeHMACSHA256(stringToSign) + if err != nil { + return nil, err + } + authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") + req.Raw().Header.Set(shared.HeaderAuthorization, authHeader) + + response, err := req.Next() + if err != nil && response != nil && response.StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") + } + return response, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go new file mode 100644 index 000000000000..2e2dd16e426c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go @@ -0,0 +1,64 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's Name and a user delegation Key from it +func NewUserDelegationCredential(accountName string, udk UserDelegationKey) *UserDelegationCredential { + return &UserDelegationCredential{ + accountName: accountName, + userDelegationKey: udk, + } +} + +// UserDelegationKey contains UserDelegationKey. +type UserDelegationKey = generated.UserDelegationKey + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential struct { + accountName string + userDelegationKey UserDelegationKey +} + +// getAccountName returns the Storage account's Name +func (f *UserDelegationCredential) getAccountName() string { + return f.accountName +} + +// GetAccountName is a helper method for accessing the user delegation key parameters outside this package. +func GetAccountName(udc *UserDelegationCredential) string { + return udc.getAccountName() +} + +// computeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (f *UserDelegationCredential) computeHMACSHA256(message string) (string, error) { + bytes, _ := base64.StdEncoding.DecodeString(*f.userDelegationKey.Value) + h := hmac.New(sha256.New, bytes) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +// ComputeUDCHMACSHA256 is a helper method for computing the signed string outside this package. +func ComputeUDCHMACSHA256(udc *UserDelegationCredential, message string) (string, error) { + return udc.computeHMACSHA256(message) +} + +// getUDKParams returns UserDelegationKey +func (f *UserDelegationCredential) getUDKParams() *UserDelegationKey { + return &f.userDelegationKey +} + +// GetUDKParams is a helper method for accessing the user delegation key parameters outside this package. +func GetUDKParams(udc *UserDelegationCredential) *UserDelegationKey { + return udc.getUDKParams() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go new file mode 100644 index 000000000000..7b8ee601dc83 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +const ( + ModuleName = "azblob" + ModuleVersion = "v0.5.1" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go new file mode 100644 index 000000000000..3b6184fea67c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *AppendBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *AppendBlobClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md new file mode 100644 index 000000000000..d4e2100e1972 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -0,0 +1,304 @@ +# Code Generation - Azure Blob SDK for Golang + +### Settings + +```yaml +go: true +clear-output-folder: false +version: "^3.0.0" +license-header: MICROSOFT_MIT_NO_VERSION +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/e515b6251fdc21015282d2e84b85beec7c091763/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json" +credential-scope: "https://storage.azure.com/.default" +output-folder: . +file-prefix: "zz_" +openapi-type: "data-plane" +verbose: true +security: AzureKey +modelerfour: + group-parameters: false + seal-single-value-enum-by-default: true + lenient-model-deduplication: true +export-clients: true +use: "@autorest/go@4.0.0-preview.43" +``` + +### Remove pager methods and export various generated methods in container client + +``` yaml +directive: + - from: zz_container_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). + replace(/\(client \*ContainerClient\) listBlobFlatSegmentCreateRequest\(/, `(client *ContainerClient) ListBlobFlatSegmentCreateRequest(`). + replace(/\(client \*ContainerClient\) listBlobFlatSegmentHandleResponse\(/, `(client *ContainerClient) ListBlobFlatSegmentHandleResponse(`); +``` + +### Remove pager methods and export various generated methods in service client + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `// listContainersSegmentCreateRequest creates the ListContainersSegment request`). + replace(/\(client \*ServiceClient\) listContainersSegmentCreateRequest\(/, `(client *ServiceClient) ListContainersSegmentCreateRequest(`). + replace(/\(client \*ServiceClient\) listContainersSegmentHandleResponse\(/, `(client *ServiceClient) ListContainersSegmentHandleResponse(`); +``` + +### Fix BlobMetadata. + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.BlobMetadata["properties"]; + +``` + +### Don't include container name or blob in path - we have direct URIs. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('/{containerName}/{blob}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); + } + else if (property.includes('/{containerName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); + } + } +``` + +### Remove DataLake stuff. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('filesystem')) + { + delete $[property]; + } + } +``` + +### Remove DataLakeStorageError + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.DataLakeStorageError; +``` + +### Fix 304s + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{containerName}/{blob}"] + transform: > + $.get.responses["304"] = { + "description": "The condition specified using HTTP conditional header(s) is not met.", + "x-az-response-name": "ConditionNotMetError", + "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } } + }; +``` + +### Fix GeoReplication + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.GeoReplication.properties.Status["x-ms-enum"]; + $.GeoReplication.properties.Status["x-ms-enum"] = { + "name": "BlobGeoReplicationStatus", + "modelAsString": false + }; +``` + +### Fix RehydratePriority + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.RehydratePriority["x-ms-enum"]; + $.RehydratePriority["x-ms-enum"] = { + "name": "RehydratePriority", + "modelAsString": false + }; +``` + +### Fix BlobDeleteType + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.BlobDeleteType.enum; + $.BlobDeleteType.enum = [ + "None", + "Permanent" + ]; +``` + +### Fix EncryptionAlgorithm + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.EncryptionAlgorithm.enum; + $.EncryptionAlgorithm.enum = [ + "None", + "AES256" + ]; +``` + +### Fix XML string "ObjectReplicationMetadata" to "OrMetadata" + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"]; + delete $.BlobItemInternal.properties["ObjectReplicationMetadata"]; +``` + +# Export various createRequest/HandleResponse methods + +``` yaml +directive: +- from: zz_container_client.go + where: $ + transform: >- + return $. + replace(/listBlobHierarchySegmentCreateRequest/g, function(_, s) { return `ListBlobHierarchySegmentCreateRequest` }). + replace(/listBlobHierarchySegmentHandleResponse/g, function(_, s) { return `ListBlobHierarchySegmentHandleResponse` }); + +- from: zz_pageblob_client.go + where: $ + transform: >- + return $. + replace(/getPageRanges(Diff)?CreateRequest/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}CreateRequest` }). + replace(/getPageRanges(Diff)?HandleResponse/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}HandleResponse` }); +``` + +### Clean up some const type names so they don't stutter + +``` yaml +directive: +- from: swagger-document + where: $.parameters['BlobDeleteType'] + transform: > + $["x-ms-enum"].name = "DeleteType"; + $["x-ms-client-name"] = "DeleteType"; + +- from: swagger-document + where: $.parameters['BlobExpiryOptions'] + transform: > + $["x-ms-enum"].name = "ExpiryOptions"; + $["x-ms-client-name"].name = "ExpiryOptions"; + +- from: swagger-document + where: $["x-ms-paths"][*].*.responses[*].headers["x-ms-immutability-policy-mode"] + transform: > + $["x-ms-client-name"].name = "ImmutabilityPolicyMode"; + $.enum = [ "Mutable", "Unlocked", "Locked"]; + $["x-ms-enum"] = { "name": "ImmutabilityPolicyMode", "modelAsString": false }; + +- from: swagger-document + where: $.parameters['ImmutabilityPolicyMode'] + transform: > + $["x-ms-enum"].name = "ImmutabilityPolicySetting"; + $["x-ms-client-name"].name = "ImmutabilityPolicySetting"; + +- from: swagger-document + where: $.definitions['BlobPropertiesInternal'] + transform: > + $.properties.ImmutabilityPolicyMode["x-ms-enum"].name = "ImmutabilityPolicyMode"; +``` + +### use azcore.ETag + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/import "time"/, `import (\n\t"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n)`). + replace(/Etag\s+\*string/g, `ETag *azcore.ETag`). + replace(/IfMatch\s+\*string/g, `IfMatch *azcore.ETag`). + replace(/IfNoneMatch\s+\*string/g, `IfNoneMatch *azcore.ETag`). + replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`). + replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`); + +- from: zz_response_types.go + where: $ + transform: >- + return $. + replace(/"time"/, `"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"`). + replace(/ETag\s+\*string/g, `ETag *azcore.ETag`); + +- from: + - zz_appendblob_client.go + - zz_blob_client.go + - zz_blockblob_client.go + - zz_container_client.go + - zz_pageblob_client.go + where: $ + transform: >- + return $. + replace(/"github\.com\/Azure\/azure\-sdk\-for\-go\/sdk\/azcore\/policy"/, `"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"`). + replace(/result\.ETag\s+=\s+&val/g, `result.ETag = (*azcore.ETag)(&val)`). + replace(/\*modifiedAccessConditions.IfMatch/g, `string(*modifiedAccessConditions.IfMatch)`). + replace(/\*modifiedAccessConditions.IfNoneMatch/g, `string(*modifiedAccessConditions.IfNoneMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfMatch/g, `string(*sourceModifiedAccessConditions.SourceIfMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfNoneMatch/g, `string(*sourceModifiedAccessConditions.SourceIfNoneMatch)`); +``` + +### Unsure why this casing changed, but fixing it + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/SignedOid\s+\*string/g, `SignedOID *string`). + replace(/SignedTid\s+\*string/g, `SignedTID *string`); +``` + +### Fixing Typo with StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed + +``` yaml +directive: +- from: zz_constants.go + where: $ + transform: >- + return $. + replace(/StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed\t+\StorageErrorCode\s+=\s+\"IncrementalCopyOfEralierVersionSnapshotNotAllowed"\n, /StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed\t+\StorageErrorCode\s+=\s+\"IncrementalCopyOfEarlierVersionSnapshotNotAllowed"\ + replace(/StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed/g, /StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed/g) +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go new file mode 100644 index 000000000000..c3d3c2607c8c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *BlobClient) Endpoint() string { + return client.endpoint +} + +func (client *BlobClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go new file mode 100644 index 000000000000..a43e327ec44d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *BlockBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *BlockBlobClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go new file mode 100644 index 000000000000..bbbf828a07af --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *ContainerClient) Endpoint() string { + return client.endpoint +} + +func (client *ContainerClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go new file mode 100644 index 000000000000..8a212cc3d4e2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *PageBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *PageBlobClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go new file mode 100644 index 000000000000..1f449b955e82 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *ServiceClient) Endpoint() string { + return client.endpoint +} + +func (client *ServiceClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go new file mode 100644 index 000000000000..d0fe18c5d272 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -0,0 +1,653 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// AppendBlobClient contains the methods for the AppendBlob group. +// Don't use this type directly, use NewAppendBlobClient() instead. +type AppendBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient { + client := &AppendBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append +// Block operation is permitted only if the blob was created with x-ms-blob-type set to +// AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// body - Initial data +// options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { + req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return AppendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) + } + return client.appendBlockHandleResponse(resp) +} + +// appendBlockCreateRequest creates the AppendBlock request. +func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/octet-stream") +} + +// appendBlockHandleResponse handles the AppendBlock response. +func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) { + result := AppendBlobClientAppendBlockResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where +// the contents are read from a source url. The Append Block operation is permitted only if the blob was +// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// sourceURL - Specify a URL to the copy source. +// contentLength - The length of the request. +// options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { + req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return AppendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.appendBlockFromURLHandleResponse(resp) +} + +// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. +func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. +func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) { + result := AppendBlobClientAppendBlockFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// Create - The Create Append Blob operation creates a new append blob. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return AppendBlobClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *AppendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) { + result := AppendBlobClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version +// or later. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { + req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return AppendBlobClientSealResponse{}, runtime.NewResponseError(resp) + } + return client.sealHandleResponse(resp) +} + +// sealCreateRequest creates the Seal request. +func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "seal") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// sealHandleResponse handles the Seal response. +func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) { + result := AppendBlobClientSealResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.IsSealed = &isSealed + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go new file mode 100644 index 000000000000..713fb52794bf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -0,0 +1,2845 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strconv" + "strings" + "time" +) + +// BlobClient contains the methods for the Blob group. +// Don't use this type directly, use NewBlobClient() instead. +type BlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewBlobClient creates a new instance of BlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func NewBlobClient(endpoint string, pl runtime.Pipeline) *BlobClient { + client := &BlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination +// blob with zero length and full metadata. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. +// options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { + req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) + if err != nil { + return BlobClientAbortCopyFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientAbortCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return BlobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.abortCopyFromURLHandleResponse(resp) +} + +// abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. +func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "copy") + reqQP.Set("copyid", copyID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-copy-action"] = []string{"abort"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// abortCopyFromURLHandleResponse handles the AbortCopyFromURL response. +func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (BlobClientAbortCopyFromURLResponse, error) { + result := BlobClientAbortCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAbortCopyFromURLResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) AcquireLease(ctx context.Context, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.acquireLeaseHandleResponse(resp) +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + if options != nil && options.Duration != nil { + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(*options.Duration), 10)} + } + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) { + result := BlobClientAcquireLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return BlobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.breakLeaseHandleResponse(resp) +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) { + result := BlobClientBreakLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// leaseID - Specifies the current lease ID on the resource. +// proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.changeLeaseHandleResponse(resp) +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) { + result := BlobClientChangeLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response +// until the copy is complete. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// options - BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCopyFromURLResponse, error) { + req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return BlobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.copyFromURLHandleResponse(resp) +} + +// copyFromURLCreateRequest creates the CopyFromURL request. +func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-requires-sync"] = []string{"true"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// copyFromURLHandleResponse handles the CopyFromURL response. +func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { + result := BlobClientCopyFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + return result, nil +} + +// CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { + req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + } + return client.createSnapshotHandleResponse(resp) +} + +// createSnapshotCreateRequest creates the CreateSnapshot request. +func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "snapshot") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createSnapshotHandleResponse handles the CreateSnapshot response. +func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) { + result := BlobClientCreateSnapshotResponse{} + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// Delete - If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed +// from the storage account. If the storage account's soft delete feature is enabled, +// then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service +// retains the blob or snapshot for the number of days specified by the +// DeleteRetentionPolicy section of Storage service properties [Set-Blob-Service-Properties.md]. After the specified number +// of days has passed, the blob's data is permanently removed from the storage +// account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use +// the List Blobs API and specify the "include=deleted" query parameter to discover +// which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. +// All other operations on a soft-deleted blob or snapshot causes the service to +// return an HTTP status code of 404 (ResourceNotFound). +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlobClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return BlobClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.DeleteType != nil { + reqQP.Set("deletetype", string(*options.DeleteType)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.DeleteSnapshots != nil { + req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientDeleteResponse, error) { + result := BlobClientDeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { + req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) + if err != nil { + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.deleteImmutabilityPolicyHandleResponse(resp) +} + +// deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. +func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "immutabilityPolicies") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteImmutabilityPolicyHandleResponse handles the DeleteImmutabilityPolicy response. +func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientDeleteImmutabilityPolicyResponse, error) { + result := BlobClientDeleteImmutabilityPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You +// can also call Download to read a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { + req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return BlobClientDownloadResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientDownloadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { + return BlobClientDownloadResponse{}, runtime.NewResponseError(resp) + } + return client.downloadHandleResponse(resp) +} + +// downloadCreateRequest creates the Download request. +func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.RangeGetContentMD5 != nil { + req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} + } + if options != nil && options.RangeGetContentCRC64 != nil { + req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// downloadHandleResponse handles the Download response. +func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) { + result := BlobClientDownloadResponse{Body: resp.Body} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.LastModified = &lastModified + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsCurrentVersion = &isCurrentVersion + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.BlobContentMD5 = blobContentMD5 + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.LastAccessed = &lastAccessed + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.LegalHold = &legalHold + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-error-code"); val != "" { + result.ErrorCode = &val + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getAccountInfoHandleResponse(resp) +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, options *BlobClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) { + result := BlobClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + return result, nil +} + +// GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties +// for the blob. It does not return the content of the blob. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) { + result := BlobClientGetPropertiesResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.CreationTime = &creationTime + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { + isIncrementalCopy, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsIncrementalCopy = &isIncrementalCopy + } + if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { + result.DestinationSnapshot = &val + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { + accessTierInferred, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.AccessTierInferred = &accessTierInferred + } + if val := resp.Header.Get("x-ms-archive-status"); val != "" { + result.ArchiveStatus = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.AccessTierChangeTime = &accessTierChangeTime + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsCurrentVersion = &isCurrentVersion + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-expiry-time"); val != "" { + expiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ExpiresOn = &expiresOn + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { + result.RehydratePriority = &val + } + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.LastAccessed = &lastAccessed + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.LegalHold = &legalHold + } + return result, nil +} + +// GetTags - The Get Tags operation enables users to get the tags associated with a blob. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { + req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientGetTagsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientGetTagsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientGetTagsResponse{}, runtime.NewResponseError(resp) + } + return client.getTagsHandleResponse(resp) +} + +// getTagsCreateRequest creates the GetTags request. +func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tags") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getTagsHandleResponse handles the GetTags response. +func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClientGetTagsResponse, error) { + result := BlobClientGetTagsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetTagsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { + return BlobClientGetTagsResponse{}, err + } + return result, nil +} + +// Query - The Query operation enables users to select/project on blob data by providing simple query expressions. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { + req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return BlobClientQueryResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientQueryResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { + return BlobClientQueryResponse{}, runtime.NewResponseError(resp) + } + return client.queryHandleResponse(resp) +} + +// queryCreateRequest creates the Query request. +func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "query") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.QueryRequest != nil { + return req, runtime.MarshalAsXML(req, *options.QueryRequest) + } + return req, nil +} + +// queryHandleResponse handles the Query response. +func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) { + result := BlobClientQueryResponse{Body: resp.Body} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.LastModified = &lastModified + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.BlobContentMD5 = blobContentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + return result, nil +} + +// ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// leaseID - Specifies the current lease ID on the resource. +// options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseLeaseHandleResponse(resp) +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) { + result := BlobClientReleaseLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// leaseID - Specifies the current lease ID on the resource. +// options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.renewLeaseHandleResponse(resp) +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) { + result := BlobClientRenewLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetExpiry - Sets the time a blob will expire and be deleted. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// expiryOptions - Required. Indicates mode of the expiry time +// options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { + req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientSetExpiryResponse{}, runtime.NewResponseError(resp) + } + return client.setExpiryHandleResponse(resp) +} + +// setExpiryCreateRequest creates the SetExpiry request. +func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "expiry") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-expiry-option"] = []string{string(expiryOptions)} + if options != nil && options.ExpiresOn != nil { + req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setExpiryHandleResponse handles the SetExpiry response. +func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) { + result := BlobClientSetExpiryResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { + req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + } + return client.setHTTPHeadersHandleResponse(resp) +} + +// setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. +func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. +func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) { + result := BlobClientSetHTTPHeadersResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { + req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.setImmutabilityPolicyHandleResponse(resp) +} + +// setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. +func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "immutabilityPolicies") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setImmutabilityPolicyHandleResponse handles the SetImmutabilityPolicy response. +func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientSetImmutabilityPolicyResponse, error) { + result := BlobClientSetImmutabilityPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiry, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + result.ImmutabilityPolicyExpiry = &immutabilityPolicyExpiry + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + return result, nil +} + +// SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// legalHold - Specified if a legal hold should be set on the blob. +// options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { + req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) + } + return client.setLegalHoldHandleResponse(resp) +} + +// setLegalHoldCreateRequest creates the SetLegalHold request. +func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "legalhold") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(legalHold)} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setLegalHoldHandleResponse handles the SetLegalHold response. +func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobClientSetLegalHoldResponse, error) { + result := BlobClientSetLegalHoldResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + result.LegalHold = &legalHold + } + return result, nil +} + +// SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value +// pairs +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) { + result := BlobClientSetMetadataResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// SetTags - The Set Tags operation enables users to set tags on a blob. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// tags - Blob tags +// options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { + req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientSetTagsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientSetTagsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return BlobClientSetTagsResponse{}, runtime.NewResponseError(resp) + } + return client.setTagsHandleResponse(resp) +} + +// setTagsCreateRequest creates the SetTags request. +func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tags") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, tags) +} + +// setTagsHandleResponse handles the SetTags response. +func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClientSetTagsResponse, error) { + result := BlobClientSetTagsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetTagsResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetTier - The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage +// account and on a block blob in a blob storage account (locally redundant storage only). A +// premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive +// storage type. This operation does not update the blob's ETag. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// tier - Indicates the tier to be set on the blob. +// options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { + req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlobClientSetTierResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientSetTierResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return BlobClientSetTierResponse{}, runtime.NewResponseError(resp) + } + return client.setTierHandleResponse(resp) +} + +// setTierCreateRequest creates the SetTier request. +func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tier") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-access-tier"] = []string{string(tier)} + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setTierHandleResponse handles the SetTier response. +func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClientSetTierResponse, error) { + result := BlobClientSetTierResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// options - BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { + req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return BlobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.startCopyFromURLHandleResponse(resp) +} + +// startCopyFromURLCreateRequest creates the StartCopyFromURL request. +func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.SealBlob != nil { + req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// startCopyFromURLHandleResponse handles the StartCopyFromURL response. +func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) { + result := BlobClientStartCopyFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + return result, nil +} + +// Undelete - Undelete a blob that was previously soft deleted +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { + req, err := client.undeleteCreateRequest(ctx, options) + if err != nil { + return BlobClientUndeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlobClientUndeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlobClientUndeleteResponse{}, runtime.NewResponseError(resp) + } + return client.undeleteHandleResponse(resp) +} + +// undeleteCreateRequest creates the Undelete request. +func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *BlobClientUndeleteOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// undeleteHandleResponse handles the Undelete response. +func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClientUndeleteResponse, error) { + result := BlobClientUndeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientUndeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go new file mode 100644 index 000000000000..5f2d1101613d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -0,0 +1,960 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// BlockBlobClient contains the methods for the BlockBlob group. +// Don't use this type directly, use NewBlockBlobClient() instead. +type BlockBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { + client := &BlockBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written to the +// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that +// have changed, then committing the new and existing blocks together. You can do +// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit +// the most recently uploaded version of the block, whichever list it may +// belong to. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// blocks - Blob Blocks. +// options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { + req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) + } + return client.commitBlockListHandleResponse(resp) +} + +// commitBlockListCreateRequest creates the CommitBlockList request. +func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, blocks) +} + +// commitBlockListHandleResponse handles the CommitBlockList response. +func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) { + result := BlockBlobClientCommitBlockListResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. +// options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { + req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return BlockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) + } + return client.getBlockListHandleResponse(resp) +} + +// getBlockListCreateRequest creates the GetBlockList request. +func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + reqQP.Set("blocklisttype", string(listType)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getBlockListHandleResponse handles the GetBlockList response. +func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) { + result := BlockBlobClientGetBlockListResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + return result, nil +} + +// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not +// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform +// partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// options - BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { + req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.putBlobFromURLHandleResponse(resp) +} + +// putBlobFromURLCreateRequest creates the PutBlobFromURL request. +func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourceBlobProperties != nil { + req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// putBlobFromURLHandleResponse handles the PutBlobFromURL response. +func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) { + result := BlockBlobClientPutBlobFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// contentLength - The length of the request. +// body - Initial data +// options - BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (BlockBlobClientStageBlockResponse, error) { + req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) + } + return client.stageBlockHandleResponse(resp) +} + +// stageBlockCreateRequest creates the StageBlock request. +func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/octet-stream") +} + +// stageBlockHandleResponse handles the StageBlock response. +func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) { + result := BlockBlobClientStageBlockResponse{} + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents +// are read from a URL. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// contentLength - The length of the request. +// sourceURL - Specify a URL to the copy source. +// options - BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL +// method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { + req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.stageBlockFromURLHandleResponse(resp) +} + +// stageBlockFromURLCreateRequest creates the StageBlockFromURL request. +func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// stageBlockFromURLHandleResponse handles the StageBlockFromURL response. +func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) { + result := BlockBlobClientStageBlockFromURLResponse{} + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob +// overwrites any existing metadata on the blob. Partial updates are not supported with Put +// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of +// the content of a block blob, use the Put Block List operation. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// body - Initial data +// options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { + req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return BlockBlobClientUploadResponse{}, runtime.NewResponseError(resp) + } + return client.uploadHandleResponse(resp) +} + +// uploadCreateRequest creates the Upload request. +func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/octet-stream") +} + +// uploadHandleResponse handles the Upload response. +func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) { + result := BlockBlobClientUploadResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go new file mode 100644 index 000000000000..74e6cf1e8590 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -0,0 +1,714 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +type AccessTier string + +const ( + AccessTierArchive AccessTier = "Archive" + AccessTierCool AccessTier = "Cool" + AccessTierHot AccessTier = "Hot" + AccessTierP10 AccessTier = "P10" + AccessTierP15 AccessTier = "P15" + AccessTierP20 AccessTier = "P20" + AccessTierP30 AccessTier = "P30" + AccessTierP4 AccessTier = "P4" + AccessTierP40 AccessTier = "P40" + AccessTierP50 AccessTier = "P50" + AccessTierP6 AccessTier = "P6" + AccessTierP60 AccessTier = "P60" + AccessTierP70 AccessTier = "P70" + AccessTierP80 AccessTier = "P80" + AccessTierPremium AccessTier = "Premium" +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return []AccessTier{ + AccessTierArchive, + AccessTierCool, + AccessTierHot, + AccessTierP10, + AccessTierP15, + AccessTierP20, + AccessTierP30, + AccessTierP4, + AccessTierP40, + AccessTierP50, + AccessTierP6, + AccessTierP60, + AccessTierP70, + AccessTierP80, + AccessTierPremium, + } +} + +type AccountKind string + +const ( + AccountKindStorage AccountKind = "Storage" + AccountKindBlobStorage AccountKind = "BlobStorage" + AccountKindStorageV2 AccountKind = "StorageV2" + AccountKindFileStorage AccountKind = "FileStorage" + AccountKindBlockBlobStorage AccountKind = "BlockBlobStorage" +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return []AccountKind{ + AccountKindStorage, + AccountKindBlobStorage, + AccountKindStorageV2, + AccountKindFileStorage, + AccountKindBlockBlobStorage, + } +} + +type ArchiveStatus string + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool" + ArchiveStatusRehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot" +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return []ArchiveStatus{ + ArchiveStatusRehydratePendingToCool, + ArchiveStatusRehydratePendingToHot, + } +} + +// BlobGeoReplicationStatus - The status of the secondary location +type BlobGeoReplicationStatus string + +const ( + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" + BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap" + BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable" +) + +// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. +func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { + return []BlobGeoReplicationStatus{ + BlobGeoReplicationStatusLive, + BlobGeoReplicationStatusBootstrap, + BlobGeoReplicationStatusUnavailable, + } +} + +type BlobType string + +const ( + BlobTypeBlockBlob BlobType = "BlockBlob" + BlobTypePageBlob BlobType = "PageBlob" + BlobTypeAppendBlob BlobType = "AppendBlob" +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return []BlobType{ + BlobTypeBlockBlob, + BlobTypePageBlob, + BlobTypeAppendBlob, + } +} + +type BlockListType string + +const ( + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" + BlockListTypeAll BlockListType = "all" +) + +// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return []BlockListType{ + BlockListTypeCommitted, + BlockListTypeUncommitted, + BlockListTypeAll, + } +} + +type CopyStatusType string + +const ( + CopyStatusTypePending CopyStatusType = "pending" + CopyStatusTypeSuccess CopyStatusType = "success" + CopyStatusTypeAborted CopyStatusType = "aborted" + CopyStatusTypeFailed CopyStatusType = "failed" +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return []CopyStatusType{ + CopyStatusTypePending, + CopyStatusTypeSuccess, + CopyStatusTypeAborted, + CopyStatusTypeFailed, + } +} + +type DeleteSnapshotsOptionType string + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = "include" + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = "only" +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return []DeleteSnapshotsOptionType{ + DeleteSnapshotsOptionTypeInclude, + DeleteSnapshotsOptionTypeOnly, + } +} + +type DeleteType string + +const ( + DeleteTypeNone DeleteType = "None" + DeleteTypePermanent DeleteType = "Permanent" +) + +// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. +func PossibleDeleteTypeValues() []DeleteType { + return []DeleteType{ + DeleteTypeNone, + DeleteTypePermanent, + } +} + +type EncryptionAlgorithmType string + +const ( + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256" +) + +// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return []EncryptionAlgorithmType{ + EncryptionAlgorithmTypeNone, + EncryptionAlgorithmTypeAES256, + } +} + +type ExpiryOptions string + +const ( + ExpiryOptionsAbsolute ExpiryOptions = "Absolute" + ExpiryOptionsNeverExpire ExpiryOptions = "NeverExpire" + ExpiryOptionsRelativeToCreation ExpiryOptions = "RelativeToCreation" + ExpiryOptionsRelativeToNow ExpiryOptions = "RelativeToNow" +) + +// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type. +func PossibleExpiryOptionsValues() []ExpiryOptions { + return []ExpiryOptions{ + ExpiryOptionsAbsolute, + ExpiryOptionsNeverExpire, + ExpiryOptionsRelativeToCreation, + ExpiryOptionsRelativeToNow, + } +} + +type ImmutabilityPolicyMode string + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable" + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked" + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return []ImmutabilityPolicyMode{ + ImmutabilityPolicyModeMutable, + ImmutabilityPolicyModeUnlocked, + ImmutabilityPolicyModeLocked, + } +} + +type ImmutabilityPolicySetting string + +const ( + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" + ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked" +) + +// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. +func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { + return []ImmutabilityPolicySetting{ + ImmutabilityPolicySettingUnlocked, + ImmutabilityPolicySettingLocked, + } +} + +type LeaseDurationType string + +const ( + LeaseDurationTypeInfinite LeaseDurationType = "infinite" + LeaseDurationTypeFixed LeaseDurationType = "fixed" +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return []LeaseDurationType{ + LeaseDurationTypeInfinite, + LeaseDurationTypeFixed, + } +} + +type LeaseStateType string + +const ( + LeaseStateTypeAvailable LeaseStateType = "available" + LeaseStateTypeLeased LeaseStateType = "leased" + LeaseStateTypeExpired LeaseStateType = "expired" + LeaseStateTypeBreaking LeaseStateType = "breaking" + LeaseStateTypeBroken LeaseStateType = "broken" +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return []LeaseStateType{ + LeaseStateTypeAvailable, + LeaseStateTypeLeased, + LeaseStateTypeExpired, + LeaseStateTypeBreaking, + LeaseStateTypeBroken, + } +} + +type LeaseStatusType string + +const ( + LeaseStatusTypeLocked LeaseStatusType = "locked" + LeaseStatusTypeUnlocked LeaseStatusType = "unlocked" +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return []LeaseStatusType{ + LeaseStatusTypeLocked, + LeaseStatusTypeUnlocked, + } +} + +type ListBlobsIncludeItem string + +const ( + ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy" + ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted" + ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" + ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" + ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" + ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" + ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" + ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" + ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" + ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" +) + +// PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type. +func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { + return []ListBlobsIncludeItem{ + ListBlobsIncludeItemCopy, + ListBlobsIncludeItemDeleted, + ListBlobsIncludeItemMetadata, + ListBlobsIncludeItemSnapshots, + ListBlobsIncludeItemUncommittedblobs, + ListBlobsIncludeItemVersions, + ListBlobsIncludeItemTags, + ListBlobsIncludeItemImmutabilitypolicy, + ListBlobsIncludeItemLegalhold, + ListBlobsIncludeItemDeletedwithversions, + } +} + +type ListContainersIncludeType string + +const ( + ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" + ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted" + ListContainersIncludeTypeSystem ListContainersIncludeType = "system" +) + +// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return []ListContainersIncludeType{ + ListContainersIncludeTypeMetadata, + ListContainersIncludeTypeDeleted, + ListContainersIncludeTypeSystem, + } +} + +type PremiumPageBlobAccessTier string + +const ( + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = "P10" + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = "P15" + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = "P20" + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = "P30" + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = "P4" + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = "P40" + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = "P50" + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = "P6" + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = "P60" + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = "P70" + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = "P80" +) + +// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type. +func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { + return []PremiumPageBlobAccessTier{ + PremiumPageBlobAccessTierP10, + PremiumPageBlobAccessTierP15, + PremiumPageBlobAccessTierP20, + PremiumPageBlobAccessTierP30, + PremiumPageBlobAccessTierP4, + PremiumPageBlobAccessTierP40, + PremiumPageBlobAccessTierP50, + PremiumPageBlobAccessTierP6, + PremiumPageBlobAccessTierP60, + PremiumPageBlobAccessTierP70, + PremiumPageBlobAccessTierP80, + } +} + +type PublicAccessType string + +const ( + PublicAccessTypeBlob PublicAccessType = "blob" + PublicAccessTypeContainer PublicAccessType = "container" +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return []PublicAccessType{ + PublicAccessTypeBlob, + PublicAccessTypeContainer, + } +} + +// QueryFormatType - The quick query format type. +type QueryFormatType string + +const ( + QueryFormatTypeDelimited QueryFormatType = "delimited" + QueryFormatTypeJSON QueryFormatType = "json" + QueryFormatTypeArrow QueryFormatType = "arrow" + QueryFormatTypeParquet QueryFormatType = "parquet" +) + +// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return []QueryFormatType{ + QueryFormatTypeDelimited, + QueryFormatTypeJSON, + QueryFormatTypeArrow, + QueryFormatTypeParquet, + } +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority string + +const ( + RehydratePriorityHigh RehydratePriority = "High" + RehydratePriorityStandard RehydratePriority = "Standard" +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return []RehydratePriority{ + RehydratePriorityHigh, + RehydratePriorityStandard, + } +} + +type SKUName string + +const ( + SKUNameStandardLRS SKUName = "Standard_LRS" + SKUNameStandardGRS SKUName = "Standard_GRS" + SKUNameStandardRAGRS SKUName = "Standard_RAGRS" + SKUNameStandardZRS SKUName = "Standard_ZRS" + SKUNamePremiumLRS SKUName = "Premium_LRS" +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return []SKUName{ + SKUNameStandardLRS, + SKUNameStandardGRS, + SKUNameStandardRAGRS, + SKUNameStandardZRS, + SKUNamePremiumLRS, + } +} + +type SequenceNumberActionType string + +const ( + SequenceNumberActionTypeMax SequenceNumberActionType = "max" + SequenceNumberActionTypeUpdate SequenceNumberActionType = "update" + SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" +) + +// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return []SequenceNumberActionType{ + SequenceNumberActionTypeMax, + SequenceNumberActionTypeUpdate, + SequenceNumberActionTypeIncrement, + } +} + +// StorageErrorCode - Error codes returned by the service +type StorageErrorCode string + +const ( + StorageErrorCodeAccountAlreadyExists StorageErrorCode = "AccountAlreadyExists" + StorageErrorCodeAccountBeingCreated StorageErrorCode = "AccountBeingCreated" + StorageErrorCodeAccountIsDisabled StorageErrorCode = "AccountIsDisabled" + StorageErrorCodeAppendPositionConditionNotMet StorageErrorCode = "AppendPositionConditionNotMet" + StorageErrorCodeAuthenticationFailed StorageErrorCode = "AuthenticationFailed" + StorageErrorCodeAuthorizationFailure StorageErrorCode = "AuthorizationFailure" + StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCode = "AuthorizationPermissionMismatch" + StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCode = "AuthorizationProtocolMismatch" + StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch" + StorageErrorCodeAuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch" + StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch" + StorageErrorCodeBlobAlreadyExists StorageErrorCode = "BlobAlreadyExists" + StorageErrorCodeBlobArchived StorageErrorCode = "BlobArchived" + StorageErrorCodeBlobBeingRehydrated StorageErrorCode = "BlobBeingRehydrated" + StorageErrorCodeBlobImmutableDueToPolicy StorageErrorCode = "BlobImmutableDueToPolicy" + StorageErrorCodeBlobNotArchived StorageErrorCode = "BlobNotArchived" + StorageErrorCodeBlobNotFound StorageErrorCode = "BlobNotFound" + StorageErrorCodeBlobOverwritten StorageErrorCode = "BlobOverwritten" + StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCode = "BlobTierInadequateForContentLength" + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption StorageErrorCode = "BlobUsesCustomerSpecifiedEncryption" + StorageErrorCodeBlockCountExceedsLimit StorageErrorCode = "BlockCountExceedsLimit" + StorageErrorCodeBlockListTooLong StorageErrorCode = "BlockListTooLong" + StorageErrorCodeCannotChangeToLowerTier StorageErrorCode = "CannotChangeToLowerTier" + StorageErrorCodeCannotVerifyCopySource StorageErrorCode = "CannotVerifyCopySource" + StorageErrorCodeConditionHeadersNotSupported StorageErrorCode = "ConditionHeadersNotSupported" + StorageErrorCodeConditionNotMet StorageErrorCode = "ConditionNotMet" + StorageErrorCodeContainerAlreadyExists StorageErrorCode = "ContainerAlreadyExists" + StorageErrorCodeContainerBeingDeleted StorageErrorCode = "ContainerBeingDeleted" + StorageErrorCodeContainerDisabled StorageErrorCode = "ContainerDisabled" + StorageErrorCodeContainerNotFound StorageErrorCode = "ContainerNotFound" + StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCode = "ContentLengthLargerThanTierLimit" + StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCode = "CopyAcrossAccountsNotSupported" + StorageErrorCodeCopyIDMismatch StorageErrorCode = "CopyIdMismatch" + StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" + StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" + StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCode = "IncrementalCopyBlobMismatch" + StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" + StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCode = "IncrementalCopySourceMustBeSnapshot" + StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCode = "InfiniteLeaseDurationRequired" + StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" + StorageErrorCodeInternalError StorageErrorCode = "InternalError" + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCode = "InvalidAuthenticationInfo" + StorageErrorCodeInvalidBlobOrBlock StorageErrorCode = "InvalidBlobOrBlock" + StorageErrorCodeInvalidBlobTier StorageErrorCode = "InvalidBlobTier" + StorageErrorCodeInvalidBlobType StorageErrorCode = "InvalidBlobType" + StorageErrorCodeInvalidBlockID StorageErrorCode = "InvalidBlockId" + StorageErrorCodeInvalidBlockList StorageErrorCode = "InvalidBlockList" + StorageErrorCodeInvalidHTTPVerb StorageErrorCode = "InvalidHttpVerb" + StorageErrorCodeInvalidHeaderValue StorageErrorCode = "InvalidHeaderValue" + StorageErrorCodeInvalidInput StorageErrorCode = "InvalidInput" + StorageErrorCodeInvalidMD5 StorageErrorCode = "InvalidMd5" + StorageErrorCodeInvalidMetadata StorageErrorCode = "InvalidMetadata" + StorageErrorCodeInvalidOperation StorageErrorCode = "InvalidOperation" + StorageErrorCodeInvalidPageRange StorageErrorCode = "InvalidPageRange" + StorageErrorCodeInvalidQueryParameterValue StorageErrorCode = "InvalidQueryParameterValue" + StorageErrorCodeInvalidRange StorageErrorCode = "InvalidRange" + StorageErrorCodeInvalidResourceName StorageErrorCode = "InvalidResourceName" + StorageErrorCodeInvalidSourceBlobType StorageErrorCode = "InvalidSourceBlobType" + StorageErrorCodeInvalidSourceBlobURL StorageErrorCode = "InvalidSourceBlobUrl" + StorageErrorCodeInvalidURI StorageErrorCode = "InvalidUri" + StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCode = "InvalidVersionForPageBlobOperation" + StorageErrorCodeInvalidXMLDocument StorageErrorCode = "InvalidXmlDocument" + StorageErrorCodeInvalidXMLNodeValue StorageErrorCode = "InvalidXmlNodeValue" + StorageErrorCodeLeaseAlreadyBroken StorageErrorCode = "LeaseAlreadyBroken" + StorageErrorCodeLeaseAlreadyPresent StorageErrorCode = "LeaseAlreadyPresent" + StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCode = "LeaseIdMismatchWithBlobOperation" + StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCode = "LeaseIdMismatchWithContainerOperation" + StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCode = "LeaseIdMismatchWithLeaseOperation" + StorageErrorCodeLeaseIDMissing StorageErrorCode = "LeaseIdMissing" + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCode = "LeaseIsBreakingAndCannotBeAcquired" + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCode = "LeaseIsBreakingAndCannotBeChanged" + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCode = "LeaseIsBrokenAndCannotBeRenewed" + StorageErrorCodeLeaseLost StorageErrorCode = "LeaseLost" + StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCode = "LeaseNotPresentWithBlobOperation" + StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCode = "LeaseNotPresentWithContainerOperation" + StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCode = "LeaseNotPresentWithLeaseOperation" + StorageErrorCodeMD5Mismatch StorageErrorCode = "Md5Mismatch" + StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCode = "MaxBlobSizeConditionNotMet" + StorageErrorCodeMetadataTooLarge StorageErrorCode = "MetadataTooLarge" + StorageErrorCodeMissingContentLengthHeader StorageErrorCode = "MissingContentLengthHeader" + StorageErrorCodeMissingRequiredHeader StorageErrorCode = "MissingRequiredHeader" + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCode = "MissingRequiredQueryParameter" + StorageErrorCodeMissingRequiredXMLNode StorageErrorCode = "MissingRequiredXmlNode" + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCode = "MultipleConditionHeadersNotSupported" + StorageErrorCodeNoAuthenticationInformation StorageErrorCode = "NoAuthenticationInformation" + StorageErrorCodeNoPendingCopyOperation StorageErrorCode = "NoPendingCopyOperation" + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCode = "OperationNotAllowedOnIncrementalCopyBlob" + StorageErrorCodeOperationTimedOut StorageErrorCode = "OperationTimedOut" + StorageErrorCodeOutOfRangeInput StorageErrorCode = "OutOfRangeInput" + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCode = "OutOfRangeQueryParameterValue" + StorageErrorCodePendingCopyOperation StorageErrorCode = "PendingCopyOperation" + StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCode = "PreviousSnapshotCannotBeNewer" + StorageErrorCodePreviousSnapshotNotFound StorageErrorCode = "PreviousSnapshotNotFound" + StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCode = "PreviousSnapshotOperationNotSupported" + StorageErrorCodeRequestBodyTooLarge StorageErrorCode = "RequestBodyTooLarge" + StorageErrorCodeRequestURLFailedToParse StorageErrorCode = "RequestUrlFailedToParse" + StorageErrorCodeResourceAlreadyExists StorageErrorCode = "ResourceAlreadyExists" + StorageErrorCodeResourceNotFound StorageErrorCode = "ResourceNotFound" + StorageErrorCodeResourceTypeMismatch StorageErrorCode = "ResourceTypeMismatch" + StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCode = "SequenceNumberConditionNotMet" + StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCode = "SequenceNumberIncrementTooLarge" + StorageErrorCodeServerBusy StorageErrorCode = "ServerBusy" + StorageErrorCodeSnapshotCountExceeded StorageErrorCode = "SnapshotCountExceeded" + StorageErrorCodeSnapshotOperationRateExceeded StorageErrorCode = "SnapshotOperationRateExceeded" + StorageErrorCodeSnapshotsPresent StorageErrorCode = "SnapshotsPresent" + StorageErrorCodeSourceConditionNotMet StorageErrorCode = "SourceConditionNotMet" + StorageErrorCodeSystemInUse StorageErrorCode = "SystemInUse" + StorageErrorCodeTargetConditionNotMet StorageErrorCode = "TargetConditionNotMet" + StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCode = "UnauthorizedBlobOverwrite" + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCode = "UnsupportedHttpVerb" + StorageErrorCodeUnsupportedHeader StorageErrorCode = "UnsupportedHeader" + StorageErrorCodeUnsupportedQueryParameter StorageErrorCode = "UnsupportedQueryParameter" + StorageErrorCodeUnsupportedXMLNode StorageErrorCode = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeValues returns the possible values for the StorageErrorCode const type. +func PossibleStorageErrorCodeValues() []StorageErrorCode { + return []StorageErrorCode{ + StorageErrorCodeAccountAlreadyExists, + StorageErrorCodeAccountBeingCreated, + StorageErrorCodeAccountIsDisabled, + StorageErrorCodeAppendPositionConditionNotMet, + StorageErrorCodeAuthenticationFailed, + StorageErrorCodeAuthorizationFailure, + StorageErrorCodeAuthorizationPermissionMismatch, + StorageErrorCodeAuthorizationProtocolMismatch, + StorageErrorCodeAuthorizationResourceTypeMismatch, + StorageErrorCodeAuthorizationServiceMismatch, + StorageErrorCodeAuthorizationSourceIPMismatch, + StorageErrorCodeBlobAlreadyExists, + StorageErrorCodeBlobArchived, + StorageErrorCodeBlobBeingRehydrated, + StorageErrorCodeBlobImmutableDueToPolicy, + StorageErrorCodeBlobNotArchived, + StorageErrorCodeBlobNotFound, + StorageErrorCodeBlobOverwritten, + StorageErrorCodeBlobTierInadequateForContentLength, + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption, + StorageErrorCodeBlockCountExceedsLimit, + StorageErrorCodeBlockListTooLong, + StorageErrorCodeCannotChangeToLowerTier, + StorageErrorCodeCannotVerifyCopySource, + StorageErrorCodeConditionHeadersNotSupported, + StorageErrorCodeConditionNotMet, + StorageErrorCodeContainerAlreadyExists, + StorageErrorCodeContainerBeingDeleted, + StorageErrorCodeContainerDisabled, + StorageErrorCodeContainerNotFound, + StorageErrorCodeContentLengthLargerThanTierLimit, + StorageErrorCodeCopyAcrossAccountsNotSupported, + StorageErrorCodeCopyIDMismatch, + StorageErrorCodeEmptyMetadataKey, + StorageErrorCodeFeatureVersionMismatch, + StorageErrorCodeIncrementalCopyBlobMismatch, + StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed, + StorageErrorCodeIncrementalCopySourceMustBeSnapshot, + StorageErrorCodeInfiniteLeaseDurationRequired, + StorageErrorCodeInsufficientAccountPermissions, + StorageErrorCodeInternalError, + StorageErrorCodeInvalidAuthenticationInfo, + StorageErrorCodeInvalidBlobOrBlock, + StorageErrorCodeInvalidBlobTier, + StorageErrorCodeInvalidBlobType, + StorageErrorCodeInvalidBlockID, + StorageErrorCodeInvalidBlockList, + StorageErrorCodeInvalidHTTPVerb, + StorageErrorCodeInvalidHeaderValue, + StorageErrorCodeInvalidInput, + StorageErrorCodeInvalidMD5, + StorageErrorCodeInvalidMetadata, + StorageErrorCodeInvalidOperation, + StorageErrorCodeInvalidPageRange, + StorageErrorCodeInvalidQueryParameterValue, + StorageErrorCodeInvalidRange, + StorageErrorCodeInvalidResourceName, + StorageErrorCodeInvalidSourceBlobType, + StorageErrorCodeInvalidSourceBlobURL, + StorageErrorCodeInvalidURI, + StorageErrorCodeInvalidVersionForPageBlobOperation, + StorageErrorCodeInvalidXMLDocument, + StorageErrorCodeInvalidXMLNodeValue, + StorageErrorCodeLeaseAlreadyBroken, + StorageErrorCodeLeaseAlreadyPresent, + StorageErrorCodeLeaseIDMismatchWithBlobOperation, + StorageErrorCodeLeaseIDMismatchWithContainerOperation, + StorageErrorCodeLeaseIDMismatchWithLeaseOperation, + StorageErrorCodeLeaseIDMissing, + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, + StorageErrorCodeLeaseLost, + StorageErrorCodeLeaseNotPresentWithBlobOperation, + StorageErrorCodeLeaseNotPresentWithContainerOperation, + StorageErrorCodeLeaseNotPresentWithLeaseOperation, + StorageErrorCodeMD5Mismatch, + StorageErrorCodeMaxBlobSizeConditionNotMet, + StorageErrorCodeMetadataTooLarge, + StorageErrorCodeMissingContentLengthHeader, + StorageErrorCodeMissingRequiredHeader, + StorageErrorCodeMissingRequiredQueryParameter, + StorageErrorCodeMissingRequiredXMLNode, + StorageErrorCodeMultipleConditionHeadersNotSupported, + StorageErrorCodeNoAuthenticationInformation, + StorageErrorCodeNoPendingCopyOperation, + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, + StorageErrorCodeOperationTimedOut, + StorageErrorCodeOutOfRangeInput, + StorageErrorCodeOutOfRangeQueryParameterValue, + StorageErrorCodePendingCopyOperation, + StorageErrorCodePreviousSnapshotCannotBeNewer, + StorageErrorCodePreviousSnapshotNotFound, + StorageErrorCodePreviousSnapshotOperationNotSupported, + StorageErrorCodeRequestBodyTooLarge, + StorageErrorCodeRequestURLFailedToParse, + StorageErrorCodeResourceAlreadyExists, + StorageErrorCodeResourceNotFound, + StorageErrorCodeResourceTypeMismatch, + StorageErrorCodeSequenceNumberConditionNotMet, + StorageErrorCodeSequenceNumberIncrementTooLarge, + StorageErrorCodeServerBusy, + StorageErrorCodeSnapshotCountExceeded, + StorageErrorCodeSnapshotOperationRateExceeded, + StorageErrorCodeSnapshotsPresent, + StorageErrorCodeSourceConditionNotMet, + StorageErrorCodeSystemInUse, + StorageErrorCodeTargetConditionNotMet, + StorageErrorCodeUnauthorizedBlobOverwrite, + StorageErrorCodeUnsupportedHTTPVerb, + StorageErrorCodeUnsupportedHeader, + StorageErrorCodeUnsupportedQueryParameter, + StorageErrorCodeUnsupportedXMLNode, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go new file mode 100644 index 000000000000..31e671f14849 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -0,0 +1,1456 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// ContainerClient contains the methods for the Container group. +// Don't use this type directly, use NewContainerClient() instead. +type ContainerClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewContainerClient creates a new instance of ContainerClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func NewContainerClient(endpoint string, pl runtime.Pipeline) *ContainerClient { + client := &ContainerClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) AcquireLease(ctx context.Context, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ContainerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.acquireLeaseHandleResponse(resp) +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + if options != nil && options.Duration != nil { + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(*options.Duration), 10)} + } + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) { + result := ContainerClientAcquireLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ContainerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.breakLeaseHandleResponse(resp) +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) { + result := ContainerClientBreakLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// leaseID - Specifies the current lease ID on the resource. +// proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.changeLeaseHandleResponse(resp) +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) { + result := ContainerClientChangeLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Create - creates a new container under the specified account. If the container with the same name already exists, the operation +// fails +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +// ContainerCpkScopeInfo - ContainerCpkScopeInfo contains a group of parameters for the ContainerClient.Create method. +func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (ContainerClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, options, containerCpkScopeInfo) + if err != nil { + return ContainerClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ContainerClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if containerCpkScopeInfo != nil && containerCpkScopeInfo.DefaultEncryptionScope != nil { + req.Raw().Header["x-ms-default-encryption-scope"] = []string{*containerCpkScopeInfo.DefaultEncryptionScope} + } + if containerCpkScopeInfo != nil && containerCpkScopeInfo.PreventEncryptionScopeOverride != nil { + req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCpkScopeInfo.PreventEncryptionScopeOverride)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) { + result := ContainerClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later +// deleted during garbage collection +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ContainerClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (ContainerClientDeleteResponse, error) { + result := ContainerClientDeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may +// be accessed publicly. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { + req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.getAccessPolicyHandleResponse(resp) +} + +// getAccessPolicyCreateRequest creates the GetAccessPolicy request. +func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "acl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccessPolicyHandleResponse handles the GetAccessPolicy response. +func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response) (ContainerClientGetAccessPolicyResponse, error) { + result := ContainerClientGetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result); err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo +// method. +func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getAccountInfoHandleResponse(resp) +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) { + result := ContainerClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + return result, nil +} + +// GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned +// does not include the container's list of blobs +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) { + result := ContainerClientGetPropertiesResponse{} + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) + } + if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { + hasImmutabilityPolicy, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.HasImmutabilityPolicy = &hasImmutabilityPolicy + } + if val := resp.Header.Get("x-ms-has-legal-hold"); val != "" { + hasLegalHold, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.HasLegalHold = &hasLegalHold + } + if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { + result.DefaultEncryptionScope = &val + } + if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { + denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + } + if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { + isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled + } + return result, nil +} + +// NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment +// method. +// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request. +func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Context, options *ContainerClientListBlobFlatSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. +func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) { + result := ContainerClientListBlobFlatSegmentResponse{} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientListBlobFlatSegmentResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { + return ContainerClientListBlobFlatSegmentResponse{}, err + } + return result, nil +} + +// NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that +// acts as a placeholder for all blobs whose names begin with the same substring up to the +// appearance of the delimiter character. The delimiter may be a single character or a string. +// options - ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment +// method. +func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) *runtime.Pager[ContainerClientListBlobHierarchySegmentResponse] { + return runtime.NewPager(runtime.PagingHandler[ContainerClientListBlobHierarchySegmentResponse]{ + More: func(page ContainerClientListBlobHierarchySegmentResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ContainerClientListBlobHierarchySegmentResponse) (ContainerClientListBlobHierarchySegmentResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + } + if err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientListBlobHierarchySegmentResponse{}, runtime.NewResponseError(resp) + } + return client.ListBlobHierarchySegmentHandleResponse(resp) + }, + }) +} + +// ListBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request. +func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + reqQP.Set("delimiter", delimiter) + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. +func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerClientListBlobHierarchySegmentResponse, error) { + result := ContainerClientListBlobHierarchySegmentResponse{} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + return result, nil +} + +// ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// leaseID - Specifies the current lease ID on the resource. +// options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseLeaseHandleResponse(resp) +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) { + result := ContainerClientReleaseLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Rename - Renames an existing container. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// sourceContainerName - Required. Specifies the name of the container to rename. +// options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { + req, err := client.renameCreateRequest(ctx, sourceContainerName, options) + if err != nil { + return ContainerClientRenameResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientRenameResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientRenameResponse{}, runtime.NewResponseError(resp) + } + return client.renameHandleResponse(resp) +} + +// renameCreateRequest creates the Rename request. +func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "rename") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-source-container-name"] = []string{sourceContainerName} + if options != nil && options.SourceLeaseID != nil { + req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// renameHandleResponse handles the Rename response. +func (client *ContainerClient) renameHandleResponse(resp *http.Response) (ContainerClientRenameResponse, error) { + result := ContainerClientRenameResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenameResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// leaseID - Specifies the current lease ID on the resource. +// options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.renewLeaseHandleResponse(resp) +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) { + result := ContainerClientRenewLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Restore - Restores a previously-deleted container. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { + req, err := client.restoreCreateRequest(ctx, options) + if err != nil { + return ContainerClientRestoreResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientRestoreResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ContainerClientRestoreResponse{}, runtime.NewResponseError(resp) + } + return client.restoreHandleResponse(resp) +} + +// restoreCreateRequest creates the Restore request. +func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options *ContainerClientRestoreOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.DeletedContainerName != nil { + req.Raw().Header["x-ms-deleted-container-name"] = []string{*options.DeletedContainerName} + } + if options != nil && options.DeletedContainerVersion != nil { + req.Raw().Header["x-ms-deleted-container-version"] = []string{*options.DeletedContainerVersion} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// restoreHandleResponse handles the Restore response. +func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (ContainerClientRestoreResponse, error) { + result := ContainerClientRestoreResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRestoreResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container +// may be accessed publicly. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// containerACL - the acls for the container +// options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { + req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.setAccessPolicyHandleResponse(resp) +} + +// setAccessPolicyCreateRequest creates the SetAccessPolicy request. +func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "acl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + type wrapper struct { + XMLName xml.Name `xml:"SignedIdentifiers"` + ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` + } + return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}) +} + +// setAccessPolicyHandleResponse handles the SetAccessPolicy response. +func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) { + result := ContainerClientSetAccessPolicyResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) { + result := ContainerClientSetMetadataResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// body - Initial data +// options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { + req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) + if err != nil { + return ContainerClientSubmitBatchResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ContainerClientSubmitBatchResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ContainerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + } + return client.submitBatchHandleResponse(resp) +} + +// submitBatchCreateRequest creates the SubmitBatch request. +func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "batch") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["Content-Type"] = []string{multipartContentType} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/xml") +} + +// submitBatchHandleResponse handles the SubmitBatch response. +func (client *ContainerClient) submitBatchHandleResponse(resp *http.Response) (ContainerClientSubmitBatchResponse, error) { + result := ContainerClientSubmitBatchResponse{Body: resp.Body} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go new file mode 100644 index 000000000000..f8df7338b2f8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -0,0 +1,1742 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AccessPolicy - An Access policy +type AccessPolicy struct { + // the date-time the policy expires + Expiry *time.Time `xml:"Expiry"` + + // the permissions for the acl policy + Permission *string `xml:"Permission"` + + // the date-time the policy is active + Start *time.Time `xml:"Start"` +} + +// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +type AppendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // Bytes of source data in the specified range. + SourceRange *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +type AppendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +type AppendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +type AppendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. +type AppendPositionAccessConditions struct { + // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. + // Append Block will succeed only if the append position is equal to this number. If + // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + AppendPosition *int64 + // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would + // cause the blob to exceed that limit or if the blob size is already greater than + // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - + // Precondition Failed). + MaxSize *int64 +} + +// ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted. +type ArrowConfiguration struct { + // REQUIRED + Schema []*ArrowField `xml:"Schema>Field"` +} + +// ArrowField - Groups settings regarding specific field of an arrow schema +type ArrowField struct { + // REQUIRED + Type *string `xml:"Type"` + Name *string `xml:"Name"` + Precision *int32 `xml:"Precision"` + Scale *int32 `xml:"Scale"` +} + +// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +type BlobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +type BlobClientAcquireLeaseOptions struct { + // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease + // can be between 15 and 60 seconds. A lease duration cannot be changed using + // renew or change. + Duration *int32 + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +type BlobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +type BlobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +type BlobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +type BlobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +type BlobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +type BlobClientDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + DeleteType *DeleteType + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +type BlobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +type BlobClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +type BlobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +type BlobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +type BlobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +type BlobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +type BlobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +type BlobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +type BlobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. +type BlobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +type BlobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +type BlobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +type BlobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +type BlobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +type BlobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +type BlobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +type BlobFlatListSegment struct { + // REQUIRED + BlobItems []*BlobItemInternal `xml:"Blob"` +} + +// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +type BlobHierarchyListSegment struct { + // REQUIRED + BlobItems []*BlobItemInternal `xml:"Blob"` + BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"` +} + +// BlobItemInternal - An Azure Storage blob +type BlobItemInternal struct { + // REQUIRED + Deleted *bool `xml:"Deleted"` + + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a blob + Properties *BlobPropertiesInternal `xml:"Properties"` + + // REQUIRED + Snapshot *string `xml:"Snapshot"` + + // Blob tags + BlobTags *BlobTags `xml:"Tags"` + HasVersionsOnly *bool `xml:"HasVersionsOnly"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + + // Dictionary of + OrMetadata map[string]*string `xml:"OrMetadata"` + VersionID *string `xml:"VersionId"` +} + +type BlobPrefix struct { + // REQUIRED + Name *string `xml:"Name"` +} + +// BlobPropertiesInternal - Properties of a blob +type BlobPropertiesInternal struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + AccessTier *AccessTier `xml:"AccessTier"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + ArchiveStatus *ArchiveStatus `xml:"ArchiveStatus"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + BlobType *BlobType `xml:"BlobType"` + CacheControl *string `xml:"Cache-Control"` + ContentDisposition *string `xml:"Content-Disposition"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + + // Size in bytes + ContentLength *int64 `xml:"Content-Length"` + ContentMD5 []byte `xml:"Content-MD5"` + ContentType *string `xml:"Content-Type"` + CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` + CopyID *string `xml:"CopyId"` + CopyProgress *string `xml:"CopyProgress"` + CopySource *string `xml:"CopySource"` + CopyStatus *CopyStatusType `xml:"CopyStatus"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + CreationTime *time.Time `xml:"Creation-Time"` + CustomerProvidedKeySHA256 *string `xml:"CustomerProvidedKeySha256"` + DeletedTime *time.Time `xml:"DeletedTime"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + + // The name of the encryption scope under which the blob is encrypted. + EncryptionScope *string `xml:"EncryptionScope"` + ExpiresOn *time.Time `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` + ImmutabilityPolicyMode *ImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + IsSealed *bool `xml:"Sealed"` + LastAccessedOn *time.Time `xml:"LastAccessTime"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + LegalHold *bool `xml:"LegalHold"` + + // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High + // and Standard. + RehydratePriority *RehydratePriority `xml:"RehydratePriority"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + TagCount *int32 `xml:"TagCount"` +} + +type BlobTag struct { + // REQUIRED + Key *string `xml:"Key"` + + // REQUIRED + Value *string `xml:"Value"` +} + +// BlobTags - Blob tags +type BlobTags struct { + // REQUIRED + BlobTagSet []*BlobTag `xml:"TagSet>Tag"` +} + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block struct { + // REQUIRED; The base64 encoded block ID. + Name *string `xml:"Name"` + + // REQUIRED; The block size in bytes. + Size *int64 `xml:"Size"` +} + +// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. +type BlockBlobClientCommitBlockListOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +type BlockBlobClientGetBlockListOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. +type BlockBlobClientPutBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. +type BlockBlobClientStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // Bytes of source data in the specified range. + SourceRange *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +type BlockBlobClientStageBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +type BlockBlobClientUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +type BlockList struct { + CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` +} + +type BlockLookupList struct { + Committed []*string `xml:"Committed"` + Latest []*string `xml:"Latest"` + Uncommitted []*string `xml:"Uncommitted"` +} + +type ClearRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +type ContainerClientAcquireLeaseOptions struct { + // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease + // can be between 15 and 60 seconds. A lease duration cannot be changed using + // renew or change. + Duration *int32 + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +type ContainerClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +type ContainerClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +type ContainerClientCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +type ContainerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. +type ContainerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. +type ContainerClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type ContainerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment +// method. +type ContainerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment +// method. +type ContainerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +type ContainerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +type ContainerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +type ContainerClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +type ContainerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedContainerName *string + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. +type ContainerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +type ContainerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +type ContainerClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerCpkScopeInfo contains a group of parameters for the ContainerClient.Create method. +type ContainerCpkScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// ContainerItem - An Azure Storage container +type ContainerItem struct { + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a container + Properties *ContainerProperties `xml:"Properties"` + Deleted *bool `xml:"Deleted"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + Version *string `xml:"Version"` +} + +// ContainerProperties - Properties of a container +type ContainerProperties struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + DeletedTime *time.Time `xml:"DeletedTime"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + + // Indicates if version level worm is enabled on this container. + IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + PublicAccess *PublicAccessType `xml:"PublicAccess"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` +} + +// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CorsRule struct { + // REQUIRED; the request headers that the origin domain may specify on the CORS request. + AllowedHeaders *string `xml:"AllowedHeaders"` + + // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods *string `xml:"AllowedMethods"` + + // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain + // is the domain from which the request originates. Note that the origin must be an exact + // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' + // to allow all origin domains to make requests via CORS. + AllowedOrigins *string `xml:"AllowedOrigins"` + + // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request + // issuer + ExposedHeaders *string `xml:"ExposedHeaders"` + + // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` +} + +// CpkInfo contains a group of parameters for the BlobClient.Download method. +type CpkInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CpkScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. +type DelimitedTextConfiguration struct { + // The string used to separate columns. + ColumnSeparator *string `xml:"ColumnSeparator"` + + // The string used as an escape character. + EscapeChar *string `xml:"EscapeChar"` + + // The string used to quote a specific field. + FieldQuote *string `xml:"FieldQuote"` + + // Represents whether the data has headers. + HeadersPresent *bool `xml:"HasHeaders"` + + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// FilterBlobItem - Blob info from a Filter Blobs API call +type FilterBlobItem struct { + // REQUIRED + ContainerName *string `xml:"ContainerName"` + + // REQUIRED + Name *string `xml:"Name"` + + // Blob tags + Tags *BlobTags `xml:"Tags"` +} + +// FilterBlobSegment - The result of a Filter Blobs API call +type FilterBlobSegment struct { + // REQUIRED + Blobs []*FilterBlobItem `xml:"Blobs>Blob"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + + // REQUIRED + Where *string `xml:"Where"` + NextMarker *string `xml:"NextMarker"` +} + +// GeoReplication - Geo-Replication information for the Secondary Storage Service +type GeoReplication struct { + // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available + // for read operations at the secondary. Primary writes after this point in time may or may + // not be available for reads. + LastSyncTime *time.Time `xml:"LastSyncTime"` + + // REQUIRED; The status of the secondary location + Status *BlobGeoReplicationStatus `xml:"Status"` +} + +// JSONTextConfiguration - json text configuration +type JSONTextConfiguration struct { + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// KeyInfo - Key information +type KeyInfo struct { + // REQUIRED; The date-time the key expires in ISO 8601 UTC time + Expiry *string `xml:"Expiry"` + + // REQUIRED; The date-time the key is active in ISO 8601 UTC time + Start *string `xml:"Start"` +} + +// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobFlatListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobHierarchyListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Delimiter *string `xml:"Delimiter"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse struct { + // REQUIRED + ContainerItems []*ContainerItem `xml:"Containers>Container"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // REQUIRED; Indicates whether all delete requests should be logged. + Delete *bool `xml:"Delete"` + + // REQUIRED; Indicates whether all read requests should be logged. + Read *bool `xml:"Read"` + + // REQUIRED; the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // REQUIRED; The version of Storage Analytics to configure. + Version *string `xml:"Version"` + + // REQUIRED; Indicates whether all write requests should be logged. + Write *bool `xml:"Write"` +} + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics struct { + // REQUIRED; Indicates whether metrics are enabled for the Blob service. + Enabled *bool `xml:"Enabled"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + + // the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // The version of Storage Analytics to configure. + Version *string `xml:"Version"` +} + +// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *azcore.ETag + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *azcore.ETag + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time +} + +// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +type PageBlobClientClearPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. +type PageBlobClientCopyIncrementalOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +type PageBlobClientCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.GetPageRangesDiff method. +type PageBlobClientGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + Prevsnapshot *string + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.GetPageRanges method. +type PageBlobClientGetPageRangesOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +type PageBlobClientResizeOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +type PageBlobClientUpdateSequenceNumberOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. +type PageBlobClientUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +type PageBlobClientUploadPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// PageList - the list of pages +type PageList struct { + ClearRange []*ClearRange `xml:"ClearRange"` + NextMarker *string `xml:"NextMarker"` + PageRange []*PageRange `xml:"PageRange"` +} + +type PageRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +type QueryFormat struct { + // REQUIRED; The quick query format type. + Type *QueryFormatType `xml:"Type"` + + // Groups the settings used for formatting the response if the response should be Arrow formatted. + ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` + + // Groups the settings used for interpreting the blob data if the blob is delimited text formatted. + DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` + + // json text configuration + JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` + + // parquet configuration + ParquetTextConfiguration interface{} `xml:"ParquetTextConfiguration"` +} + +// QueryRequest - Groups the set of query request settings. +type QueryRequest struct { + // REQUIRED; The query expression in SQL. The maximum size of the query expression is 256KiB. + Expression *string `xml:"Expression"` + + // CONSTANT; Required. The type of the provided query expression. + // Field has constant value "SQL", any specified value is ignored. + QueryType *string `xml:"QueryType"` + InputSerialization *QuerySerialization `xml:"InputSerialization"` + OutputSerialization *QuerySerialization `xml:"OutputSerialization"` +} + +type QuerySerialization struct { + // REQUIRED + Format *QueryFormat `xml:"Format"` +} + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy struct { + // REQUIRED; Indicates whether a retention policy is enabled for the storage service + Enabled *bool `xml:"Enabled"` + + // Indicates whether permanent delete is allowed on this storage account. + AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` + + // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this + // value will be deleted + Days *int32 `xml:"Days"` +} + +// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +type ServiceClientFilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Filters the results to return only to return only blobs whose tags match the specified expression. + Where *string +} + +// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +type ServiceClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +type ServiceClientGetStatisticsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. +type ServiceClientGetUserDelegationKeyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.ListContainersSegment +// method. +type ServiceClientListContainersSegmentOptions struct { + // Include this parameter to specify that the container's metadata be returned as part of the response body. + Include []ListContainersIncludeType + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +type ServiceClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // REQUIRED; An Access policy + AccessPolicy *AccessPolicy `xml:"AccessPolicy"` + + // REQUIRED; a unique id + ID *string `xml:"Id"` +} + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *azcore.ETag + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *azcore.ETag + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // REQUIRED; Indicates whether this account is hosting a static website + Enabled *bool `xml:"Enabled"` + + // Absolute path of the default index page + DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` + + // The absolute path of the custom 404 page + ErrorDocument404Path *string `xml:"ErrorDocument404Path"` + + // The default name of the index page under each directory + IndexDocument *string `xml:"IndexDocument"` +} + +type StorageError struct { + Message *string `json:"Message,omitempty"` +} + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + // The set of CORS rules. + Cors []*CorsRule `xml:"Cors>CorsRule"` + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string `xml:"DefaultServiceVersion"` + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics `xml:"HourMetrics"` + + // Azure Analytics Logging settings. + Logging *Logging `xml:"Logging"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite `xml:"StaticWebsite"` +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + // Geo-Replication information for the Secondary Storage Service + GeoReplication *GeoReplication `xml:"GeoReplication"` +} + +// UserDelegationKey - A user delegation key +type UserDelegationKey struct { + // REQUIRED; The date-time the key expires + SignedExpiry *time.Time `xml:"SignedExpiry"` + + // REQUIRED; The Azure Active Directory object ID in GUID format. + SignedOID *string `xml:"SignedOid"` + + // REQUIRED; Abbreviation of the Azure Storage service that accepts the key + SignedService *string `xml:"SignedService"` + + // REQUIRED; The date-time the key is active + SignedStart *time.Time `xml:"SignedStart"` + + // REQUIRED; The Azure Active Directory tenant ID in GUID format + SignedTID *string `xml:"SignedTid"` + + // REQUIRED; The service version that created the key + SignedVersion *string `xml:"SignedVersion"` + + // REQUIRED; The key as a base64 string + Value *string `xml:"Value"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go new file mode 100644 index 000000000000..770e41a080bd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -0,0 +1,481 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" + "time" +) + +// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. +func (a AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(&a), + Expiry: (*timeRFC3339)(a.Expiry), + Start: (*timeRFC3339)(a.Start), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. +func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(a), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + a.Expiry = (*time.Time)(aux.Expiry) + a.Start = (*time.Time)(aux.Start) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. +func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ArrowConfiguration + aux := &struct { + *alias + Schema *[]*ArrowField `xml:"Schema>Field"` + }{ + alias: (*alias)(&a), + } + if a.Schema != nil { + aux.Schema = &a.Schema + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. +func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobFlatListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItemInternal `xml:"Blob"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment. +func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobHierarchyListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItemInternal `xml:"Blob"` + BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + if b.BlobPrefixes != nil { + aux.BlobPrefixes = &b.BlobPrefixes + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItemInternal. +func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias BlobItemInternal + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + OrMetadata additionalProperties `xml:"OrMetadata"` + }{ + alias: (*alias)(b), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + b.Metadata = (map[string]*string)(aux.Metadata) + b.OrMetadata = (map[string]*string)(aux.OrMetadata) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type BlobPropertiesInternal. +func (b BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobPropertiesInternal + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&b), + AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), + CreationTime: (*timeRFC1123)(b.CreationTime), + DeletedTime: (*timeRFC1123)(b.DeletedTime), + ExpiresOn: (*timeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), + LastModified: (*timeRFC1123)(b.LastModified), + } + if b.ContentMD5 != nil { + encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) + aux.ContentMD5 = &encodedContentMD5 + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPropertiesInternal. +func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias BlobPropertiesInternal + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(b), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + if aux.ContentMD5 != nil { + if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil { + return err + } + } + b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) + b.CreationTime = (*time.Time)(aux.CreationTime) + b.DeletedTime = (*time.Time)(aux.DeletedTime) + b.ExpiresOn = (*time.Time)(aux.ExpiresOn) + b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) + b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) + b.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type BlobTags. +func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "Tags" + type alias BlobTags + aux := &struct { + *alias + BlobTagSet *[]*BlobTag `xml:"TagSet>Tag"` + }{ + alias: (*alias)(&b), + } + if b.BlobTagSet != nil { + aux.BlobTagSet = &b.BlobTagSet + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlockList. +func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlockList + aux := &struct { + *alias + CommittedBlocks *[]*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks *[]*Block `xml:"UncommittedBlocks>Block"` + }{ + alias: (*alias)(&b), + } + if b.CommittedBlocks != nil { + aux.CommittedBlocks = &b.CommittedBlocks + } + if b.UncommittedBlocks != nil { + aux.UncommittedBlocks = &b.UncommittedBlocks + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlockLookupList. +func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "BlockList" + type alias BlockLookupList + aux := &struct { + *alias + Committed *[]*string `xml:"Committed"` + Latest *[]*string `xml:"Latest"` + Uncommitted *[]*string `xml:"Uncommitted"` + }{ + alias: (*alias)(&b), + } + if b.Committed != nil { + aux.Committed = &b.Committed + } + if b.Latest != nil { + aux.Latest = &b.Latest + } + if b.Uncommitted != nil { + aux.Uncommitted = &b.Uncommitted + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem. +func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias ContainerItem + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + }{ + alias: (*alias)(c), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + c.Metadata = (map[string]*string)(aux.Metadata) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ContainerProperties. +func (c ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&c), + DeletedTime: (*timeRFC1123)(c.DeletedTime), + LastModified: (*timeRFC1123)(c.LastModified), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties. +func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(c), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + c.DeletedTime = (*time.Time)(aux.DeletedTime) + c.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. +func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias FilterBlobSegment + aux := &struct { + *alias + Blobs *[]*FilterBlobItem `xml:"Blobs>Blob"` + }{ + alias: (*alias)(&f), + } + if f.Blobs != nil { + aux.Blobs = &f.Blobs + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type GeoReplication. +func (g GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(&g), + LastSyncTime: (*timeRFC1123)(g.LastSyncTime), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. +func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(g), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse. +func (l ListContainersSegmentResponse) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ListContainersSegmentResponse + aux := &struct { + *alias + ContainerItems *[]*ContainerItem `xml:"Containers>Container"` + }{ + alias: (*alias)(&l), + } + if l.ContainerItems != nil { + aux.ContainerItems = &l.ContainerItems + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type PageList. +func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias PageList + aux := &struct { + *alias + ClearRange *[]*ClearRange `xml:"ClearRange"` + PageRange *[]*PageRange `xml:"PageRange"` + }{ + alias: (*alias)(&p), + } + if p.ClearRange != nil { + aux.ClearRange = &p.ClearRange + } + if p.PageRange != nil { + aux.PageRange = &p.PageRange + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type QueryRequest. +func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "QueryRequest" + type alias QueryRequest + aux := &struct { + *alias + }{ + alias: (*alias)(&q), + } + return e.EncodeElement(aux, start) +} + +// MarshalJSON implements the json.Marshaller interface for type StorageError. +func (s StorageError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "Message", s.Message) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageError. +func (s *StorageError) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Message": + err = unpopulate(val, "Message", &s.Message) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. +func (s StorageServiceProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias StorageServiceProperties + aux := &struct { + *alias + Cors *[]*CorsRule `xml:"Cors>CorsRule"` + }{ + alias: (*alias)(&s), + } + if s.Cors != nil { + aux.Cors = &s.Cors + } + return e.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. +func (u UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` + SignedStart *timeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(&u), + SignedExpiry: (*timeRFC3339)(u.SignedExpiry), + SignedStart: (*timeRFC3339)(u.SignedStart), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. +func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` + SignedStart *timeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(u), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + u.SignedExpiry = (*time.Time)(aux.SignedExpiry) + u.SignedStart = (*time.Time)(aux.SignedStart) + return nil +} + +func populate(m map[string]interface{}, k string, v interface{}) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v interface{}) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go new file mode 100644 index 000000000000..2747e4081208 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -0,0 +1,1287 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// PageBlobClient contains the methods for the PageBlob group. +// Don't use this type directly, use NewPageBlobClient() instead. +type PageBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func NewPageBlobClient(endpoint string, pl runtime.Pipeline) *PageBlobClient { + client := &PageBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// ClearPages - The Clear Pages operation clears a set of pages from a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { + req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) + } + return client.clearPagesHandleResponse(resp) +} + +// clearPagesCreateRequest creates the ClearPages request. +func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"clear"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// clearPagesHandleResponse handles the ClearPages response. +func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) { + result := PageBlobClientClearPagesResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied +// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can +// be read or copied from as usual. This API is supported since REST version +// 2016-05-31. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// options - PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { + req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return PageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) + } + return client.copyIncrementalHandleResponse(resp) +} + +// copyIncrementalCreateRequest creates the CopyIncremental request. +func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "incrementalcopy") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// copyIncrementalHandleResponse handles the CopyIncremental response. +func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) { + result := PageBlobClientCopyIncrementalResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + return result, nil +} + +// Create - The Create operation creates a new page blob. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// options - PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + req.Raw().Header["x-ms-meta-"+k] = []string{v} + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) { + result := PageBlobClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot +// of a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.GetPageRanges method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesResponse] { + return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesResponse]{ + More: func(page PageBlobClientGetPageRangesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + } + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientGetPageRangesResponse{}, runtime.NewResponseError(resp) + } + return client.GetPageRangesHandleResponse(resp) + }, + }) +} + +// GetPageRangesCreateRequest creates the GetPageRanges request. +func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// GetPageRangesHandleResponse handles the GetPageRanges response. +func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) { + result := PageBlobClientGetPageRangesResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + return result, nil +} + +// NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that +// were changed between target blob and previous snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.GetPageRangesDiff +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesDiffResponse] { + return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesDiffResponse]{ + More: func(page PageBlobClientGetPageRangesDiffResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + } + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientGetPageRangesDiffResponse{}, runtime.NewResponseError(resp) + } + return client.GetPageRangesDiffHandleResponse(resp) + }, + }) +} + +// GetPageRangesDiffCreateRequest creates the GetPageRangesDiff request. +func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Prevsnapshot != nil { + reqQP.Set("prevsnapshot", *options.Prevsnapshot) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.PrevSnapshotURL != nil { + req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response. +func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) { + result := PageBlobClientGetPageRangesDiffResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + return result, nil +} + +// Resize - Resize the Blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { + req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientResizeResponse{}, runtime.NewResponseError(resp) + } + return client.resizeHandleResponse(resp) +} + +// resizeCreateRequest creates the Resize request. +func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// resizeHandleResponse handles the Resize response. +func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) { + result := PageBlobClientResizeResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// UpdateSequenceNumber - Update the sequence number of the blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to +// page blobs only. This property indicates how the service should modify the blob's sequence number +// options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { + req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) + } + return client.updateSequenceNumberHandleResponse(resp) +} + +// updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. +func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. +func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) { + result := PageBlobClientUpdateSequenceNumberResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// UploadPages - The Upload Pages operation writes a range of pages to a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// body - Initial data +// options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { + req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) + } + return client.uploadPagesHandleResponse(resp) +} + +// uploadPagesCreateRequest creates the UploadPages request. +func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/octet-stream") +} + +// uploadPagesHandleResponse handles the UploadPages response. +func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) { + result := PageBlobClientUploadPagesResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from +// a URL +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// sourceURL - Specify a URL to the copy source. +// sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header +// and x-ms-range/Range destination range header. +// contentLength - The length of the request. +// rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end +// is required. +// options - PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL +// method. +// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { + req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return PageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.uploadPagesFromURLHandleResponse(resp) +} + +// uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. +func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + req.Raw().Header["x-ms-source-range"] = []string{sourceRange} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["x-ms-range"] = []string{rangeParam} + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. +func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) { + result := PageBlobClientUploadPagesFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go new file mode 100644 index 000000000000..3d2d5d2f953e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go @@ -0,0 +1,1972 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "io" + "time" +) + +// AppendBlobClientAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. +type AppendBlobClientAppendBlockFromURLResponse struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. +type AppendBlobClientAppendBlockResponse struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create. +type AppendBlobClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// AppendBlobClientSealResponse contains the response from method AppendBlobClient.Seal. +type AppendBlobClientSealResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. +type BlobClientAbortCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientAcquireLeaseResponse contains the response from method BlobClient.AcquireLease. +type BlobClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientBreakLeaseResponse contains the response from method BlobClient.BreakLease. +type BlobClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientChangeLeaseResponse contains the response from method BlobClient.ChangeLease. +type BlobClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientCopyFromURLResponse contains the response from method BlobClient.CopyFromURL. +type BlobClientCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. +type BlobClientCreateSnapshotResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Snapshot contains the information returned from the x-ms-snapshot header response. + Snapshot *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientDeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicy. +type BlobClientDeleteImmutabilityPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientDeleteResponse contains the response from method BlobClient.Delete. +type BlobClientDeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientDownloadResponse contains the response from method BlobClient.Download. +type BlobClientDownloadResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ErrorCode contains the information returned from the x-ms-error-code header response. + ErrorCode *string + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientGetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. +type BlobClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientGetPropertiesResponse contains the response from method BlobClient.GetProperties. +type BlobClientGetPropertiesResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // AccessTier contains the information returned from the x-ms-access-tier header response. + AccessTier *string + + // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response. + AccessTierChangeTime *time.Time + + // AccessTierInferred contains the information returned from the x-ms-access-tier-inferred header response. + AccessTierInferred *bool + + // ArchiveStatus contains the information returned from the x-ms-archive-status header response. + ArchiveStatus *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DestinationSnapshot contains the information returned from the x-ms-copy-destination-snapshot header response. + DestinationSnapshot *string + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ExpiresOn contains the information returned from the x-ms-expiry-time header response. + ExpiresOn *time.Time + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsIncrementalCopy contains the information returned from the x-ms-incremental-copy header response. + IsIncrementalCopy *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]string + + // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response. + RehydratePriority *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. +type BlobClientGetTagsResponse struct { + BlobTags + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// BlobClientQueryResponse contains the response from method BlobClient.Query. +type BlobClientQueryResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type BlobClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientRenewLeaseResponse contains the response from method BlobClient.RenewLease. +type BlobClientRenewLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetExpiryResponse contains the response from method BlobClient.SetExpiry. +type BlobClientSetExpiryResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type BlobClientSetHTTPHeadersResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. +type BlobClientSetImmutabilityPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ImmutabilityPolicyExpiry contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiry *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. +type BlobClientSetLegalHoldResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetMetadataResponse contains the response from method BlobClient.SetMetadata. +type BlobClientSetMetadataResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientSetTagsResponse contains the response from method BlobClient.SetTags. +type BlobClientSetTagsResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetTierResponse contains the response from method BlobClient.SetTier. +type BlobClientSetTierResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type BlobClientStartCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientUndeleteResponse contains the response from method BlobClient.Undelete. +type BlobClientUndeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. +type BlockBlobClientCommitBlockListResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. +type BlockBlobClientGetBlockListResponse struct { + BlockList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 `xml:"BlobContentLength"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. +type BlockBlobClientPutBlobFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlockBlobClientStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. +type BlockBlobClientStageBlockFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock. +type BlockBlobClientStageBlockResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload. +type BlockBlobClientUploadResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// ContainerClientAcquireLeaseResponse contains the response from method ContainerClient.AcquireLease. +type ContainerClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientBreakLeaseResponse contains the response from method ContainerClient.BreakLease. +type ContainerClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientChangeLeaseResponse contains the response from method ContainerClient.ChangeLease. +type ContainerClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientCreateResponse contains the response from method ContainerClient.Create. +type ContainerClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientDeleteResponse contains the response from method ContainerClient.Delete. +type ContainerClientDeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. +type ContainerClientGetAccessPolicyResponse struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // a collection of signed identifiers + SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. +type ContainerClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetPropertiesResponse contains the response from method ContainerClient.GetProperties. +type ContainerClientGetPropertiesResponse struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response. + DefaultEncryptionScope *string + + // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response. + DenyEncryptionScopeOverride *bool + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. + HasImmutabilityPolicy *bool + + // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response. + HasLegalHold *bool + + // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled + // header response. + IsImmutableStorageWithVersioningEnabled *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.ListBlobFlatSegment. +type ContainerClientListBlobFlatSegmentResponse struct { + ListBlobsFlatSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.ListBlobHierarchySegment. +type ContainerClientListBlobHierarchySegmentResponse struct { + ListBlobsHierarchySegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. +type ContainerClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRenameResponse contains the response from method ContainerClient.Rename. +type ContainerClientRenameResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRenewLeaseResponse contains the response from method ContainerClient.RenewLease. +type ContainerClientRenewLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRestoreResponse contains the response from method ContainerClient.Restore. +type ContainerClientRestoreResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy. +type ContainerClientSetAccessPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSetMetadataResponse contains the response from method ContainerClient.SetMetadata. +type ContainerClientSetMetadataResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSubmitBatchResponse contains the response from method ContainerClient.SubmitBatch. +type ContainerClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientClearPagesResponse contains the response from method PageBlobClient.ClearPages. +type PageBlobClientClearPagesResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental. +type PageBlobClientCopyIncrementalResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientCreateResponse contains the response from method PageBlobClient.Create. +type PageBlobClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.GetPageRangesDiff. +type PageBlobClientGetPageRangesDiffResponse struct { + PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 `xml:"BlobContentLength"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.GetPageRanges. +type PageBlobClientGetPageRangesResponse struct { + PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 `xml:"BlobContentLength"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. +type PageBlobClientResizeResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber. +type PageBlobClientUpdateSequenceNumberResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL. +type PageBlobClientUploadPagesFromURLResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages. +type PageBlobClientUploadPagesResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. +type ServiceClientFilterBlobsResponse struct { + FilterBlobSegment + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. +type ServiceClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. +type ServiceClientGetPropertiesResponse struct { + StorageServiceProperties + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. +type ServiceClientGetStatisticsResponse struct { + StorageServiceStats + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. +type ServiceClientGetUserDelegationKeyResponse struct { + UserDelegationKey + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.ListContainersSegment. +type ServiceClientListContainersSegmentResponse struct { + ListContainersSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. +type ServiceClientSetPropertiesResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientSubmitBatchResponse contains the response from method ServiceClient.SubmitBatch. +type ServiceClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go new file mode 100644 index 000000000000..1cb779d84812 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -0,0 +1,551 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// ServiceClient contains the methods for the Service group. +// Don't use this type directly, use NewServiceClient() instead. +type ServiceClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewServiceClient creates a new instance of ServiceClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { + client := &ServiceClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search +// expression. Filter blobs searches across all containers within a storage account but can +// be scoped within the expression to a single container. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +func (client *ServiceClient) FilterBlobs(ctx context.Context, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { + req, err := client.filterBlobsCreateRequest(ctx, options) + if err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + } + return client.filterBlobsHandleResponse(resp) +} + +// filterBlobsCreateRequest creates the FilterBlobs request. +func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, options *ServiceClientFilterBlobsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blobs") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Where != nil { + reqQP.Set("where", *options.Where) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// filterBlobsHandleResponse handles the FilterBlobs response. +func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (ServiceClientFilterBlobsResponse, error) { + result := ServiceClientFilterBlobsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getAccountInfoHandleResponse(resp) +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) { + result := ServiceClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { + isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled + } + return result, nil +} + +// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and +// CORS (Cross-Origin Resource Sharing) rules. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, options *ServiceClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (ServiceClientGetPropertiesResponse, error) { + result := ServiceClientGetPropertiesResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + return result, nil +} + +// GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary +// location endpoint when read-access geo-redundant replication is enabled for the storage account. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { + req, err := client.getStatisticsCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + } + return client.getStatisticsHandleResponse(resp) +} + +// getStatisticsCreateRequest creates the GetStatistics request. +func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, options *ServiceClientGetStatisticsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "stats") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getStatisticsHandleResponse handles the GetStatistics response. +func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (ServiceClientGetStatisticsResponse, error) { + result := ServiceClientGetStatisticsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + return result, nil +} + +// GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using +// bearer token authentication. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// keyInfo - Key information +// options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey +// method. +func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { + req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) + if err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) + } + return client.getUserDelegationKeyHandleResponse(resp) +} + +// getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. +func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "userdelegationkey") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, keyInfo) +} + +// getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. +func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (ServiceClientGetUserDelegationKeyResponse, error) { + result := ServiceClientGetUserDelegationKeyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + return result, nil +} + +// NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified +// account +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.ListContainersSegment +// method. +// listContainersSegmentCreateRequest creates the ListContainersSegment request. +func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Context, options *ServiceClientListContainersSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listContainersSegmentHandleResponse handles the ListContainersSegment response. +func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Response) (ServiceClientListContainersSegmentResponse, error) { + result := ServiceClientListContainersSegmentResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.ListContainersSegmentResponse); err != nil { + return ServiceClientListContainersSegmentResponse{}, err + } + return result, nil +} + +// SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// storageServiceProperties - The StorageService properties. +// options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { + req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.setPropertiesHandleResponse(resp) +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, storageServiceProperties) +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (ServiceClientSetPropertiesResponse, error) { + result := ServiceClientSetPropertiesResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2020-10-02 +// contentLength - The length of the request. +// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// body - Initial data +// options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { + req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) + if err != nil { + return ServiceClientSubmitBatchResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientSubmitBatchResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + } + return client.submitBatchHandleResponse(resp) +} + +// submitBatchCreateRequest creates the SubmitBatch request. +func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "batch") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["Content-Type"] = []string{multipartContentType} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(body, "application/xml") +} + +// submitBatchHandleResponse handles the SubmitBatch response. +func (client *ServiceClient) submitBatchHandleResponse(resp *http.Response) (ServiceClientSubmitBatchResponse, error) { + result := ServiceClientSubmitBatchResponse{Body: resp.Body} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go new file mode 100644 index 000000000000..4b4d51aa3994 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "strings" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` +) + +type timeRFC1123 time.Time + +func (t timeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(rfc1123JSON)) + return b, nil +} + +func (t timeRFC1123) MarshalText() ([]byte, error) { + b := []byte(time.Time(t).Format(time.RFC1123)) + return b, nil +} + +func (t *timeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) + *t = timeRFC1123(p) + return err +} + +func (t *timeRFC1123) UnmarshalText(data []byte) error { + p, err := time.Parse(time.RFC1123, string(data)) + *t = timeRFC1123(p) + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go new file mode 100644 index 000000000000..1ce9d621164e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go @@ -0,0 +1,59 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "regexp" + "strings" + "time" +) + +const ( + utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` + utcLayout = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +type timeRFC3339 time.Time + +func (t timeRFC3339) MarshalJSON() (json []byte, err error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t timeRFC3339) MarshalText() (text []byte, err error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *timeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcLayoutJSON + if tzOffsetRegex.Match(data) { + layout = rfc3339JSON + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + layout := utcLayout + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = timeRFC3339(p) + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go new file mode 100644 index 000000000000..144ea18e1aba --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "encoding/xml" + "strings" +) + +type additionalProperties map[string]*string + +// UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. +func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + for t, err := d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + break + case xml.CharData: + if tokName == "" { + continue + } + if *ap == nil { + *ap = additionalProperties{} + } + s := string(tt) + (*ap)[tokName] = &s + tokName = "" + break + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go new file mode 100644 index 000000000000..a86fc582c52f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -0,0 +1,78 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "context" + "errors" +) + +// BatchTransferOptions identifies options used by doBatchTransfer. +type BatchTransferOptions struct { + TransferSize int64 + ChunkSize int64 + Concurrency uint16 + Operation func(offset int64, chunkSize int64, ctx context.Context) error + OperationName string +} + +// DoBatchTransfer helps to execute operations in a batch manner. +// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) +func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { + if o.ChunkSize == 0 { + return errors.New("ChunkSize cannot be 0") + } + + if o.Concurrency == 0 { + o.Concurrency = 5 // default concurrency + } + + // Prepare and do parallel operations. + numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) + operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently + operationResponseChannel := make(chan error, numChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + for g := uint16(0); g < o.Concurrency; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + err := f() + operationResponseChannel <- err + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + curChunkSize := o.ChunkSize + + if chunkNum == numChunks-1 { // Last chunk + curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.ChunkSize + + operationChannel <- func() error { + return o.Operation(offset, curChunkSize, ctx) + } + } + close(operationChannel) + + // Wait for the operations to complete. + var firstErr error = nil + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + responseError := <-operationResponseChannel + // record the first error (the original error which should cause the other chunks to fail with canceled context) + if responseError != nil && firstErr == nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + firstErr = responseError + } + } + return firstErr +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go new file mode 100644 index 000000000000..8d4d35bdeffd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" +) + +type bytesWriter []byte + +func NewBytesWriter(b []byte) bytesWriter { + return b +} + +func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { + if off >= int64(len(c)) || off < 0 { + return 0, errors.New("offset value is out of range") + } + + n := copy(c[int(off):], b) + if n < len(b) { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go new file mode 100644 index 000000000000..c8528a2e3ed2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "io" +) + +type SectionWriter struct { + Count int64 + Offset int64 + Position int64 + WriterAt io.WriterAt +} + +func NewSectionWriter(c io.WriterAt, off int64, count int64) *SectionWriter { + return &SectionWriter{ + Count: count, + Offset: off, + WriterAt: c, + } +} + +func (c *SectionWriter) Write(p []byte) (int, error) { + remaining := c.Count - c.Position + + if remaining <= 0 { + return 0, errors.New("end of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.WriterAt.WriteAt(slice, c.Offset+c.Position) + c.Position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go new file mode 100644 index 000000000000..48a5ad842aec --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -0,0 +1,238 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "fmt" + "io" + "net" + "net/url" + "strconv" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +const ( + TokenScope = "https://storage.azure.com/.default" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderXmsDate = "x-ms-date" + HeaderContentLength = "Content-Length" + HeaderContentEncoding = "Content-Encoding" + HeaderContentLanguage = "Content-Language" + HeaderContentType = "Content-Type" + HeaderContentMD5 = "Content-MD5" + HeaderIfModifiedSince = "If-Modified-Since" + HeaderIfMatch = "If-Match" + HeaderIfNoneMatch = "If-None-Match" + HeaderIfUnmodifiedSince = "If-Unmodified-Since" + HeaderRange = "Range" +) + +// CopyOptions returns a zero-value T if opts is nil. +// If opts is not nil, a copy is made and its address returned. +func CopyOptions[T any](opts *T) *T { + if opts == nil { + return new(T) + } + cp := *opts + return &cp +} + +var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + + "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + + "AccountKey=;EndpointSuffix=core.windows.net'") + +type ParsedConnectionString struct { + ServiceURL string + AccountName string + AccountKey string +} + +func ParseConnectionString(connectionString string) (ParsedConnectionString, error) { + const ( + defaultScheme = "https" + defaultSuffix = "core.windows.net" + ) + + connStrMap := make(map[string]string) + connectionString = strings.TrimRight(connectionString, ";") + + splitString := strings.Split(connectionString, ";") + if len(splitString) == 0 { + return ParsedConnectionString{}, errConnectionString + } + for _, stringPart := range splitString { + parts := strings.SplitN(stringPart, "=", 2) + if len(parts) != 2 { + return ParsedConnectionString{}, errConnectionString + } + connStrMap[parts[0]] = parts[1] + } + + accountName, ok := connStrMap["AccountName"] + if !ok { + return ParsedConnectionString{}, errors.New("connection string missing AccountName") + } + + accountKey, ok := connStrMap["AccountKey"] + if !ok { + sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] + if !ok { + return ParsedConnectionString{}, errors.New("connection string missing AccountKey and SharedAccessSignature") + } + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), + }, nil + } + + protocol, ok := connStrMap["DefaultEndpointsProtocol"] + if !ok { + protocol = defaultScheme + } + + suffix, ok := connStrMap["EndpointSuffix"] + if !ok { + suffix = defaultSuffix + } + + if blobEndpoint, ok := connStrMap["BlobEndpoint"]; ok { + return ParsedConnectionString{ + ServiceURL: blobEndpoint, + AccountName: accountName, + AccountKey: accountKey, + }, nil + } + + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix), + AccountName: accountName, + AccountKey: accountKey, + }, nil +} + +// SerializeBlobTags converts tags to generated.BlobTags +func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { + if tagsMap == nil { + return nil + } + blobTagSet := make([]*generated.BlobTag, 0) + for key, val := range tagsMap { + newKey, newVal := key, val + blobTagSet = append(blobTagSet, &generated.BlobTag{Key: &newKey, Value: &newVal}) + } + return &generated.BlobTags{BlobTagSet: blobTagSet} +} + +func SerializeBlobTagsToStrPtr(tagsMap map[string]string) *string { + if tagsMap == nil { + return nil + } + tags := make([]string, 0) + for key, val := range tagsMap { + tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) + } + blobTagsString := strings.Join(tags, "&") + return &blobTagsString +} + +func ValidateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body's are "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} + +func RangeToString(offset, count int64) string { + return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10) +} + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +func GenerateLeaseID(leaseID *string) (*string, error) { + if leaseID == nil { + generatedUuid, err := uuid.New() + if err != nil { + return nil, err + } + leaseID = to.Ptr(generatedUuid.String()) + } + return leaseID, nil +} + +func GetClientOptions[T any](o *T) *T { + if o == nil { + return new(T) + } + return o +} + +// IsIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/container/... +// As url's Host property, host could be both host or host:port +func IsIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go new file mode 100644 index 000000000000..f3c9d4be7e01 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "fmt" + "sync" +) + +const _1MiB = 1024 * 1024 + +// TransferManager provides a buffer and thread pool manager for certain transfer options. +// It is undefined behavior if code outside this package call any of these methods. +type TransferManager interface { + // Get provides a buffer that will be used to read data into and write out to the stream. + // It is guaranteed by this package to not read or write beyond the size of the slice. + Get() []byte + + // Put may or may not put the buffer into underlying storage, depending on settings. + // The buffer must not be touched after this has been called. + Put(b []byte) // nolint + + // Run will use a goroutine pool entry to run a function. This blocks until a pool + // goroutine becomes available. + Run(func()) + + // Close shuts down all internal goroutines. This must be called when the TransferManager + // will no longer be used. Not closing it will cause a goroutine leak. + Close() +} + +// --------------------------------------------------------------------------------------------------------------------- + +type staticBuffer struct { + buffers chan []byte + size int + threadpool chan func() +} + +// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer +// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This +// can be shared between calls if you wish to control maximum memory and concurrency with +// multiple concurrent calls. +func NewStaticBuffer(size, max int) (TransferManager, error) { + if size < 1 || max < 1 { + return nil, fmt.Errorf("cannot be called with size or max set to < 1") + } + + if size < _1MiB { + return nil, fmt.Errorf("cannot have size < 1MiB") + } + + threadpool := make(chan func(), max) + buffers := make(chan []byte, max) + for i := 0; i < max; i++ { + go func() { + for f := range threadpool { + f() + } + }() + + buffers <- make([]byte, size) + } + return staticBuffer{ + buffers: buffers, + size: size, + threadpool: threadpool, + }, nil +} + +// Get implements TransferManager.Get(). +func (s staticBuffer) Get() []byte { + return <-s.buffers +} + +// Put implements TransferManager.Put(). +func (s staticBuffer) Put(b []byte) { // nolint + select { + case s.buffers <- b: + default: // This shouldn't happen, but just in case they call Put() with there own buffer. + } +} + +// Run implements TransferManager.Run(). +func (s staticBuffer) Run(f func()) { + s.threadpool <- f +} + +// Close implements TransferManager.Close(). +func (s staticBuffer) Close() { + close(s.threadpool) + close(s.buffers) +} + +// --------------------------------------------------------------------------------------------------------------------- + +type syncPool struct { + threadpool chan func() + pool sync.Pool +} + +// NewSyncPool creates a TransferManager that will use a sync.Pool +// that can hold a non-capped number of buffers constrained by concurrency. This +// can be shared between calls if you wish to share memory and concurrency. +func NewSyncPool(size, concurrency int) (TransferManager, error) { + if size < 1 || concurrency < 1 { + return nil, fmt.Errorf("cannot be called with size or max set to < 1") + } + + if size < _1MiB { + return nil, fmt.Errorf("cannot have size < 1MiB") + } + + threadpool := make(chan func(), concurrency) + for i := 0; i < concurrency; i++ { + go func() { + for f := range threadpool { + f() + } + }() + } + + return &syncPool{ + threadpool: threadpool, + pool: sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, + }, + }, nil +} + +// Get implements TransferManager.Get(). +func (s *syncPool) Get() []byte { + return s.pool.Get().([]byte) +} + +// Put implements TransferManager.Put(). +// nolint +func (s *syncPool) Put(b []byte) { + s.pool.Put(b) +} + +// Run implements TransferManager.Run(). +func (s *syncPool) Run(f func()) { + s.threadpool <- f +} + +// Close implements TransferManager.Close(). +func (s *syncPool) Close() { + close(s.threadpool) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md new file mode 100644 index 000000000000..1b1a4b45d54f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md @@ -0,0 +1,76 @@ +# Guide to migrate from `azure-storage-blob-go` to `azblob` + +This guide is intended to assist in the migration from the `azure-storage-blob-go` module, or previous betas of `azblob`, to the latest releases of the `azblob` module. + +## Simplified API surface area + +The redesign of the `azblob` module separates clients into various sub-packages. +In previous versions, the public surface area was "flat", so all clients and supporting types were in the `azblob` package. +This made it difficult to navigate the public surface area. + +## Clients + +In `azure-storage-blob-go` a client constructor always requires a `url.URL` and `Pipeline` parameters. + +In `azblob` a client constructor always requires a `string` URL, any specified credential type, and a `*ClientOptions` for optional values. You pass `nil` to accept default options. + +```go +// new code +client, err := azblob.NewClient("", cred, nil) +``` + +## Authentication + +In `azure-storage-blob-go` you created a `Pipeline` with the required credential type. This pipeline was then passed to the client constructor. + +In `azblob`, you pass the required credential directly to the client constructor. + +```go +// new code. cred is an AAD token credential created from the azidentity module +client, err := azblob.NewClient("", cred, nil) +``` + +The `azure-storage-blob-go` module provided limited support for OAuth token authentication via `NewTokenCredential`. +This been replaced by using Azure Identity credentials from [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#section-readme). + +Authentication with a shared key via `NewSharedKeyCredential` remains unchanged. + +In `azure-storage-blob-go` you created a `Pipeline` with `NewAnonymousCredential` to support anonymous or SAS authentication. + +In `azblob` you use the construtor `NewClientWithNoCredential()` instead. + +```go +// new code +client, err := azblob.NewClientWithNoCredential("", nil) +``` + +## Listing blobs/containers + +In `azure-storage-blob-go` you explicitly created a `Marker` type that was used to page over results ([example](https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob?utm_source=godoc#example-package)). + +In `azblob`, operations that return paginated values return a `*runtime.Pager[T]`. + +```go +// new code +pager := client.NewListBlobsFlatPager("my-container", nil) +for pager.More() { + page, err := pager.NextPage(context.TODO()) + // process results +} +``` + +## Configuring the HTTP pipeline + +In `azure-storage-blob-go` you explicitly created a HTTP pipeline with configuration before creating a client. +This pipeline instance was then passed as an argument to the client constructor ([example](https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob?utm_source=godoc#example-NewPipeline)). + +In `azblob` a HTTP pipeline is created during client construction. The pipeline is configured through the `azcore.ClientOptions` type. + +```go +// new code +client, err := azblob.NewClient(account, cred, &azblob.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + // configure HTTP pipeline options here + }, +}) +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go new file mode 100644 index 000000000000..099f48d17217 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" +) + +// CreateContainerOptions contains the optional parameters for the ContainerClient.Create method. +type CreateContainerOptions = service.CreateContainerOptions + +// DeleteContainerOptions contains the optional parameters for the container.Client.Delete method. +type DeleteContainerOptions = service.DeleteContainerOptions + +// DeleteBlobOptions contains the optional parameters for the Client.Delete method. +type DeleteBlobOptions = blob.DeleteOptions + +// DownloadStreamOptions contains the optional parameters for the Client.DownloadStream method. +type DownloadStreamOptions = blob.DownloadStreamOptions + +// ListBlobsFlatOptions contains the optional parameters for the container.Client.ListBlobFlatSegment method. +type ListBlobsFlatOptions = container.ListBlobsFlatOptions + +// ListBlobsInclude indicates what additional information the service should return with each blob. +type ListBlobsInclude = container.ListBlobsInclude + +// ListContainersOptions contains the optional parameters for the container.Client.ListContainers operation +type ListContainersOptions = service.ListContainersOptions + +// UploadBufferOptions provides set of configurations for UploadBuffer operation +type UploadBufferOptions = blockblob.UploadBufferOptions + +// UploadFileOptions provides set of configurations for UploadFile operation +type UploadFileOptions = blockblob.UploadFileOptions + +// UploadStreamOptions provides set of configurations for UploadStream operation +type UploadStreamOptions = blockblob.UploadStreamOptions + +// DownloadBufferOptions identifies options used by the DownloadBuffer and DownloadFile functions. +type DownloadBufferOptions = blob.DownloadBufferOptions + +// DownloadFileOptions identifies options used by the DownloadBuffer and DownloadFile functions. +type DownloadFileOptions = blob.DownloadFileOptions + +// CpkInfo contains a group of parameters for client provided encryption key. +type CpkInfo = generated.CpkInfo + +// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method. +type CpkScopeInfo = generated.ContainerCpkScopeInfo + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.BlobAccessConditions + +// ListContainersInclude indicates what additional information the service should return with each container. +type ListContainersInclude = service.ListContainersInclude + +// ObjectReplicationPolicy are deserialized attributes +type ObjectReplicationPolicy = blob.ObjectReplicationPolicy + +// RetryReaderOptions contains properties which can help to decide when to do retry. +type RetryReaderOptions = blob.RetryReaderOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go new file mode 100644 index 000000000000..e210a76e6e68 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -0,0 +1,403 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import ( + "context" + "io" + "net/http" + "net/url" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a client to an Azure Storage page blob; +type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewPageBlobClient(blobURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (pb *Client) generated() *generated.PageBlobClient { + _, pageBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) + return pageBlob +} + +// URL returns the URL endpoint used by the Client object. +func (pb *Client) URL() string { + return pb.generated().Endpoint() +} + +// BlobClient returns the embedded blob client for this AppendBlob client. +func (pb *Client) BlobClient() *blob.Client { + innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) + return (*blob.Client)(innerBlob) +} + +func (pb *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) +} + +// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(pb.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil +} + +// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the version returning a URL to the base blob. +func (pb *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(pb.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil +} + +// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (pb *Client) Create(ctx context.Context, size int64, o *CreateOptions) (CreateResponse, error) { + createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() + + resp, err := pb.generated().Create(ctx, 0, size, createOptions, HTTPHeaders, + leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + return resp, err +} + +// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb *Client) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *UploadPagesOptions) (UploadPagesResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + + if err != nil { + return UploadPagesResponse{}, err + } + + uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + resp, err := pb.generated().UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions, + cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + + return resp, err +} + +// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. +// The sourceOffset specifies the start offset of source data to copy from. +// The destOffset specifies the start offset of data in page blob will be written to. +// The count must be a multiple of 512 bytes. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. +func (pb *Client) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64, + o *UploadPagesFromURLOptions) (UploadPagesFromURLResponse, error) { + + uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, + modifiedAccessConditions, sourceModifiedAccessConditions := o.format() + + resp, err := pb.generated().UploadPagesFromURL(ctx, source, shared.RangeToString(sourceOffset, count), 0, + shared.RangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, + sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + + return resp, err +} + +// ClearPages frees the specified pages from the page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb *Client) ClearPages(ctx context.Context, rnge blob.HTTPRange, options *ClearPagesOptions) (ClearPagesResponse, error) { + clearOptions := &generated.PageBlobClientClearPagesOptions{ + Range: exported.FormatHTTPRange(rnge), + } + + leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + resp, err := pb.generated().ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo, + cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + + return resp, err +} + +// NewGetPageRangesPager returns the list of valid page ranges for a page blob or snapshot of a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb *Client) NewGetPageRangesPager(o *GetPageRangesOptions) *runtime.Pager[GetPageRangesResponse] { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + + return runtime.NewPager(runtime.PagingHandler[GetPageRangesResponse]{ + More: func(page GetPageRangesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *GetPageRangesResponse) (GetPageRangesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = pb.generated().GetPageRangesCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } else { + opts.Marker = page.NextMarker + req, err = pb.generated().GetPageRangesCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } + if err != nil { + return GetPageRangesResponse{}, err + } + resp, err := pb.generated().Pipeline().Do(req) + if err != nil { + return GetPageRangesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetPageRangesResponse{}, runtime.NewResponseError(resp) + } + return pb.generated().GetPageRangesHandleResponse(resp) + }, + }) +} + +// NewGetPageRangesDiffPager gets the collection of page ranges that differ between a specified snapshot and this page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb *Client) NewGetPageRangesDiffPager(o *GetPageRangesDiffOptions) *runtime.Pager[GetPageRangesDiffResponse] { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + + return runtime.NewPager(runtime.PagingHandler[GetPageRangesDiffResponse]{ + More: func(page GetPageRangesDiffResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *GetPageRangesDiffResponse) (GetPageRangesDiffResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = pb.generated().GetPageRangesDiffCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } else { + opts.Marker = page.NextMarker + req, err = pb.generated().GetPageRangesDiffCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } + if err != nil { + return GetPageRangesDiffResponse{}, err + } + resp, err := pb.generated().Pipeline().Do(req) + if err != nil { + return GetPageRangesDiffResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetPageRangesDiffResponse{}, runtime.NewResponseError(resp) + } + return pb.generated().GetPageRangesDiffHandleResponse(resp) + }, + }) +} + +// Resize resizes the page blob to the specified size (which must be a multiple of 512). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb *Client) Resize(ctx context.Context, size int64, options *ResizeOptions) (ResizeResponse, error) { + resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() + + resp, err := pb.generated().Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + + return resp, err +} + +// UpdateSequenceNumber sets the page blob's sequence number. +func (pb *Client) UpdateSequenceNumber(ctx context.Context, options *UpdateSequenceNumberOptions) (UpdateSequenceNumberResponse, error) { + actionType, updateOptions, lac, mac := options.format() + resp, err := pb.generated().UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac) + + return resp, err +} + +// StartCopyIncremental begins an operation to start an incremental copy from one-page blob's snapshot to this page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. +// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and +// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. +func (pb *Client) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *CopyIncrementalOptions) (CopyIncrementalResponse, error) { + copySourceURL, err := url.Parse(copySource) + if err != nil { + return CopyIncrementalResponse{}, err + } + + queryParams := copySourceURL.Query() + queryParams.Set("snapshot", prevSnapshot) + copySourceURL.RawQuery = queryParams.Encode() + + pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format() + resp, err := pb.generated().CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions) + + return resp, err +} + +// Redeclared APIs + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (pb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return pb.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (pb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return pb.BlobClient().Undelete(ctx, o) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tier-ing see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (pb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return pb.BlobClient().SetTier(ctx, tier, o) +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (pb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return pb.BlobClient().GetProperties(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return pb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (pb *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return pb.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (pb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return pb.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (pb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return pb.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (pb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return pb.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (pb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return pb.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (pb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return pb.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (pb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return pb.BlobClient().CopyFromURL(ctx, copySource, o) +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (pb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return pb.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (pb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return pb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (pb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return pb.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go new file mode 100644 index 000000000000..646587cec8d0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go @@ -0,0 +1,65 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +const ( + // PageBytes indicates the number of bytes in a page (512). + PageBytes = 512 +) + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// PremiumPageBlobAccessTier defines values for Premium PageBlob's AccessTier +type PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTier + +const ( + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP10 + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP15 + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP20 + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP30 + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP4 + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP40 + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP50 + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP6 + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP60 + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP70 + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP80 +) + +// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type. +func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { + return generated.PossiblePremiumPageBlobAccessTierValues() +} + +// SequenceNumberActionType defines values for SequenceNumberActionType +type SequenceNumberActionType = generated.SequenceNumberActionType + +const ( + SequenceNumberActionTypeMax SequenceNumberActionType = generated.SequenceNumberActionTypeMax + SequenceNumberActionTypeUpdate SequenceNumberActionType = generated.SequenceNumberActionTypeUpdate + SequenceNumberActionTypeIncrement SequenceNumberActionType = generated.SequenceNumberActionTypeIncrement +) + +// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return generated.PossibleSequenceNumberActionTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go new file mode 100644 index 000000000000..c1b1194ff83b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go @@ -0,0 +1,338 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// Type Declarations --------------------------------------------------------------------- + +// PageList - the list of pages +type PageList = generated.PageList + +// ClearRange defines a range of pages. +type ClearRange = generated.ClearRange + +// PageRange defines a range of pages. +type PageRange = generated.PageRange + +// SequenceNumberAccessConditions contains a group of parameters for the Client.UploadPages method. +type SequenceNumberAccessConditions = generated.SequenceNumberAccessConditions + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + SequenceNumber *int64 + + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string + + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + + HTTPHeaders *blob.HTTPHeaders + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + AccessConditions *blob.AccessConditions + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *blob.ImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool +} + +func (o *CreateOptions) format() (*generated.PageBlobClientCreateOptions, *generated.BlobHTTPHeaders, + *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &generated.PageBlobClientCreateOptions{ + BlobSequenceNumber: o.SequenceNumber, + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + Metadata: o.Metadata, + Tier: o.Tier, + } + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadPagesOptions contains the optional parameters for the Client.UploadPages method. +type UploadPagesOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange + + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + SequenceNumberAccessConditions *SequenceNumberAccessConditions + AccessConditions *blob.AccessConditions +} + +func (o *UploadPagesOptions) format() (*generated.PageBlobClientUploadPagesOptions, *generated.LeaseAccessConditions, + *generated.CpkInfo, *generated.CpkScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &generated.PageBlobClientUploadPagesOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + Range: exported.FormatHTTPRange(o.Range), + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadPagesFromURLOptions contains the optional parameters for the Client.UploadPagesFromURL method. +type UploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + + CpkInfo *blob.CpkInfo + + CpkScopeInfo *blob.CpkScopeInfo + + SequenceNumberAccessConditions *SequenceNumberAccessConditions + + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions + + AccessConditions *blob.AccessConditions +} + +func (o *UploadPagesFromURLOptions) format() (*generated.PageBlobClientUploadPagesFromURLOptions, *generated.CpkInfo, *generated.CpkScopeInfo, + *generated.LeaseAccessConditions, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := &generated.PageBlobClientUploadPagesFromURLOptions{ + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + CopySourceAuthorization: o.CopySourceAuthorization, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ClearPagesOptions contains the optional parameters for the Client.ClearPages operation +type ClearPagesOptions struct { + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + SequenceNumberAccessConditions *SequenceNumberAccessConditions + AccessConditions *blob.AccessConditions +} + +func (o *ClearPagesOptions) format() (*generated.LeaseAccessConditions, *generated.CpkInfo, + *generated.CpkScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPageRangesOptions contains the optional parameters for the Client.NewGetPageRangesPager method. +type GetPageRangesOptions struct { + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + PrevSnapshot *string + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + AccessConditions *blob.AccessConditions +} + +func (o *GetPageRangesOptions) format() (*generated.PageBlobClientGetPageRangesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.PageBlobClientGetPageRangesOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Range: exported.FormatHTTPRange(o.Range), + Snapshot: o.Snapshot, + }, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPageRangesDiffOptions contains the optional parameters for the Client.NewGetPageRangesDiffPager method. +type GetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + PrevSnapshot *string + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + AccessConditions *blob.AccessConditions +} + +func (o *GetPageRangesDiffOptions) format() (*generated.PageBlobClientGetPageRangesDiffOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.PageBlobClientGetPageRangesDiffOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + PrevSnapshotURL: o.PrevSnapshotURL, + Prevsnapshot: o.PrevSnapshot, + Range: exported.FormatHTTPRange(o.Range), + Snapshot: o.Snapshot, + }, leaseAccessConditions, modifiedAccessConditions + +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ResizeOptions contains the optional parameters for the Client.Resize method. +type ResizeOptions struct { + CpkInfo *blob.CpkInfo + CpkScopeInfo *blob.CpkScopeInfo + AccessConditions *blob.AccessConditions +} + +func (o *ResizeOptions) format() (*generated.PageBlobClientResizeOptions, *generated.LeaseAccessConditions, + *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UpdateSequenceNumberOptions contains the optional parameters for the Client.UpdateSequenceNumber method. +type UpdateSequenceNumberOptions struct { + ActionType *SequenceNumberActionType + + SequenceNumber *int64 + + AccessConditions *blob.AccessConditions +} + +func (o *UpdateSequenceNumberOptions) format() (*generated.SequenceNumberActionType, *generated.PageBlobClientUpdateSequenceNumberOptions, + *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + options := &generated.PageBlobClientUpdateSequenceNumberOptions{ + BlobSequenceNumber: o.SequenceNumber, + } + + if *o.ActionType == SequenceNumberActionTypeIncrement { + options.BlobSequenceNumber = nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return o.ActionType, options, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CopyIncrementalOptions contains the optional parameters for the Client.StartCopyIncremental method. +type CopyIncrementalOptions struct { + ModifiedAccessConditions *blob.ModifiedAccessConditions +} + +func (o *CopyIncrementalOptions) format() (*generated.PageBlobClientCopyIncrementalOptions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.ModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go new file mode 100644 index 000000000000..876efbab1d01 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.PageBlobClientCreateResponse + +// UploadPagesResponse contains the response from method Client.UploadPages. +type UploadPagesResponse = generated.PageBlobClientUploadPagesResponse + +// UploadPagesFromURLResponse contains the response from method Client.UploadPagesFromURL. +type UploadPagesFromURLResponse = generated.PageBlobClientUploadPagesFromURLResponse + +// ClearPagesResponse contains the response from method Client.ClearPages. +type ClearPagesResponse = generated.PageBlobClientClearPagesResponse + +// GetPageRangesResponse contains the response from method Client.NewGetPageRangesPager. +type GetPageRangesResponse = generated.PageBlobClientGetPageRangesResponse + +// GetPageRangesDiffResponse contains the response from method Client.NewGetPageRangesDiffPager. +type GetPageRangesDiffResponse = generated.PageBlobClientGetPageRangesDiffResponse + +// ResizeResponse contains the response from method Client.Resize. +type ResizeResponse = generated.PageBlobClientResizeResponse + +// UpdateSequenceNumberResponse contains the response from method Client.UpdateSequenceNumber. +type UpdateSequenceNumberResponse = generated.PageBlobClientUpdateSequenceNumberResponse + +// CopyIncrementalResponse contains the response from method Client.StartCopyIncremental. +type CopyIncrementalResponse = generated.PageBlobClientCopyIncrementalResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go new file mode 100644 index 000000000000..86b05d098f43 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" +) + +// CreateContainerResponse contains the response from method container.Client.Create. +type CreateContainerResponse = service.CreateContainerResponse + +// DeleteContainerResponse contains the response from method container.Client.Delete +type DeleteContainerResponse = service.DeleteContainerResponse + +// DeleteBlobResponse contains the response from method blob.Client.Delete. +type DeleteBlobResponse = blob.DeleteResponse + +// UploadResponse contains the response from method blockblob.Client.CommitBlockList. +type UploadResponse = blockblob.CommitBlockListResponse + +// DownloadStreamResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry. +type DownloadStreamResponse = blob.DownloadStreamResponse + +// ListBlobsFlatResponse contains the response from method container.Client.ListBlobFlatSegment. +type ListBlobsFlatResponse = container.ListBlobsFlatResponse + +// ListContainersResponse contains the response from method service.Client.ListContainersSegment. +type ListContainersResponse = service.ListContainersResponse + +// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadBufferResponse = blockblob.UploadBufferResponse + +// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadFileResponse = blockblob.UploadFileResponse + +// UploadStreamResponse contains the response from method Client.CommitBlockList. +type UploadStreamResponse = blockblob.CommitBlockListResponse + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse = generated.ListContainersSegmentResponse + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse = generated.ListBlobsFlatSegmentResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go new file mode 100644 index 000000000000..fa76ed95c416 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go @@ -0,0 +1,316 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential = exported.UserDelegationCredential + +// AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSignatureValues struct { + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() + ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() +} + +// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = Version + } + perms, err := parseAccountPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + v.Services, + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That is right, the account SAS requires a terminating extra newline + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: v.Services, + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// SignWithUserDelegation uses an account's UserDelegationKey to sign this signature values to produce the proper SAS query parameters. +func (v AccountSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = Version + } + + perms, err := parseAccountPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + exported.GetAccountName(userDelegationCredential), + v.Permissions, + v.Services, + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That is right, the account SAS requires a terminating extra newline + "\n") + + signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: v.Services, + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + udk := exported.GetUDKParams(userDelegationCredential) + + //User delegation SAS specific parameters + p.signedOID = *udk.SignedOID + p.signedTID = *udk.SignedTID + p.signedStart = *udk.SignedStart + p.signedExpiry = *udk.SignedExpiry + p.signedService = *udk.SignedService + p.signedVersion = *udk.SignedVersion + + return p, nil +} + +// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. +type AccountPermissions struct { + Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Permissions field. +func (p *AccountPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.DeletePreviousVersion { + buffer.WriteRune('x') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + if p.Tag { + buffer.WriteRune('t') + } + if p.FilterByTags { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASPermissions' fields from a string. +func parseAccountPermissions(s string) (AccountPermissions, error) { + p := AccountPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + case 'x': + p.Process = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + default: + return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r) + } + } + return p, nil +} + +// AccountServices type simplifies creating the services string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. +type AccountServices struct { + Blob, Queue, File bool +} + +// String produces the SAS services string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Services field. +func (s *AccountServices) String() string { + var buffer bytes.Buffer + if s.Blob { + buffer.WriteRune('b') + } + if s.Queue { + buffer.WriteRune('q') + } + if s.File { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASServices' fields from a string. +/*func parseAccountServices(str string) (AccountServices, error) { + s := AccountServices{} // Clear out the flags + for _, r := range str { + switch r { + case 'b': + s.Blob = true + case 'q': + s.Queue = true + case 'f': + s.File = true + default: + return AccountServices{}, fmt.Errorf("invalid service character: '%v'", r) + } + } + return s, nil +}*/ + +// AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. +type AccountResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's ResourceTypes field. +func (rt *AccountResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// Parse initializes the AccountResourceTypes's fields from a string. +/*func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { + rt := AccountResourceTypes{} // Clear out the flags + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return AccountResourceTypes{}, fmt.Errorf("invalid resource type: '%v'", r) + } + } + return rt, nil +}*/ diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go new file mode 100644 index 000000000000..1a9c8c12d17b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go @@ -0,0 +1,440 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "errors" + "net" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// TimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const ( + TimeFormat = "2006-01-02T15:04:05Z" // "2017-07-27T00:00:00Z" // ISO 8601 +) + +var ( + // Version is the default version encoded in the SAS token. + Version = "2020-02-10" +) + +// TimeFormats ISO 8601 format. +// Please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. +var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", TimeFormat, "2006-01-02T15:04Z", "2006-01-02"} + +// Protocol indicates the http/https. +type Protocol string + +const ( + // ProtocolHTTPS can be specified for a SAS protocol + ProtocolHTTPS Protocol = "https" + + // ProtocolHTTPSandHTTP can be specified for a SAS protocol + ProtocolHTTPSandHTTP Protocol = "https,http" +) + +// FormatTimesForSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// Field's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func formatTimesForSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatTimeWithDefaultFormat(&expiryTime) + } + sh := "" + if !snapshotTime.IsZero() { + sh = snapshotTime.Format(exported.SnapshotTimeFormat) + } + return ss, se, sh +} + +// formatTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatTimeWithDefaultFormat(t *time.Time) string { + return formatTime(t, TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// ParseTime try to parse a SAS time string. +func parseTime(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range timeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// QueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type QueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol Protocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + snapshotTime time.Time `param:"snapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + signedOID string `param:"skoid"` + signedTID string `param:"sktid"` + signedStart time.Time `param:"skt"` + signedService string `param:"sks"` + signedExpiry time.Time `param:"ske"` + signedVersion string `param:"skv"` + signedDirectoryDepth string `param:"sdd"` + preauthorizedAgentObjectID string `param:"saoid"` + agentObjectID string `param:"suoid"` + correlationID string `param:"scid"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +// PreauthorizedAgentObjectID returns preauthorizedAgentObjectID +func (p *QueryParameters) PreauthorizedAgentObjectID() string { + return p.preauthorizedAgentObjectID +} + +// AgentObjectID returns agentObjectID +func (p *QueryParameters) AgentObjectID() string { + return p.agentObjectID +} + +// SignedCorrelationID returns signedCorrelationID +func (p *QueryParameters) SignedCorrelationID() string { + return p.correlationID +} + +// SignedOID returns signedOID +func (p *QueryParameters) SignedOID() string { + return p.signedOID +} + +// SignedTID returns signedTID +func (p *QueryParameters) SignedTID() string { + return p.signedTID +} + +// SignedStart returns signedStart +func (p *QueryParameters) SignedStart() time.Time { + return p.signedStart +} + +// SignedExpiry returns signedExpiry +func (p *QueryParameters) SignedExpiry() time.Time { + return p.signedExpiry +} + +// SignedService returns signedService +func (p *QueryParameters) SignedService() string { + return p.signedService +} + +// SignedVersion returns signedVersion +func (p *QueryParameters) SignedVersion() string { + return p.signedVersion +} + +// SnapshotTime returns snapshotTime +func (p *QueryParameters) SnapshotTime() time.Time { + return p.snapshotTime +} + +// Version returns version +func (p *QueryParameters) Version() string { + return p.version +} + +// Services returns services +func (p *QueryParameters) Services() string { + return p.services +} + +// ResourceTypes returns resourceTypes +func (p *QueryParameters) ResourceTypes() string { + return p.resourceTypes +} + +// Protocol returns protocol +func (p *QueryParameters) Protocol() Protocol { + return p.protocol +} + +// StartTime returns startTime +func (p *QueryParameters) StartTime() time.Time { + return p.startTime +} + +// ExpiryTime returns expiryTime +func (p *QueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +// IPRange returns ipRange +func (p *QueryParameters) IPRange() IPRange { + return p.ipRange +} + +// Identifier returns identifier +func (p *QueryParameters) Identifier() string { + return p.identifier +} + +// Resource returns resource +func (p *QueryParameters) Resource() string { + return p.resource +} + +// Permissions returns permissions +func (p *QueryParameters) Permissions() string { + return p.permissions +} + +// Signature returns signature +func (p *QueryParameters) Signature() string { + return p.signature +} + +// CacheControl returns cacheControl +func (p *QueryParameters) CacheControl() string { + return p.cacheControl +} + +// ContentDisposition returns contentDisposition +func (p *QueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +// ContentEncoding returns contentEncoding +func (p *QueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +// ContentLanguage returns contentLanguage +func (p *QueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +// ContentType returns sontentType +func (p *QueryParameters) ContentType() string { + return p.contentType +} + +// SignedDirectoryDepth returns signedDirectoryDepth +func (p *QueryParameters) SignedDirectoryDepth() string { + return p.signedDirectoryDepth +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *QueryParameters) Encode() string { + v := url.Values{} + + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signedOID != "" { + v.Add("skoid", p.signedOID) + v.Add("sktid", p.signedTID) + v.Add("skt", p.signedStart.Format(TimeFormat)) + v.Add("ske", p.signedExpiry.Format(TimeFormat)) + v.Add("sks", p.signedService) + v.Add("skv", p.signedVersion) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + if p.signedDirectoryDepth != "" { + v.Add("sdd", p.signedDirectoryDepth) + } + if p.preauthorizedAgentObjectID != "" { + v.Add("saoid", p.preauthorizedAgentObjectID) + } + if p.agentObjectID != "" { + v.Add("suoid", p.agentObjectID) + } + if p.correlationID != "" { + v.Add("scid", p.correlationID) + } + + return v.Encode() +} + +// NewQueryParameters creates and initializes a QueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) QueryParameters { + p := QueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = Protocol(val) + case "snapshot": + p.snapshotTime, _ = time.Parse(exported.SnapshotTimeFormat, val) + case "st": + p.startTime, p.stTimeFormat, _ = parseTime(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseTime(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOID = val + case "sktid": + p.signedTID = val + case "skt": + p.signedStart, _ = time.Parse(TimeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(TimeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.preauthorizedAgentObjectID = val + case "suoid": + p.agentObjectID = val + case "scid": + p.correlationID = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go new file mode 100644 index 000000000000..98d853d4e9f8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -0,0 +1,425 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package sas + +import ( + "bytes" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// BlobSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +type BlobSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to Version + Protocol Protocol `param:"spr"` // See the Protocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + SnapshotTime time.Time + Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + ContainerName string + BlobName string // Use "" to create a Container SAS + Directory string // Not nil for a directory SAS (ie sr=d) + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct + BlobVersion string // sr=bv + PreauthorizedAgentObjectId string + AgentObjectId string + CorrelationId string +} + +func getDirectoryDepth(path string) string { + if path == "" { + return "" + } + return fmt.Sprint(strings.Count(path, "/") + 1) +} + +// SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. +func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + if sharedKeyCredential == nil { + return QueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential") + } + + //Make sure the permission characters are in the correct order + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + resource := "c" + if !v.SnapshotTime.IsZero() { + resource = "bs" + } else if v.BlobVersion != "" { + resource = "bv" + } else if v.Directory != "" { + resource = "d" + v.BlobName = "" + } else if v.BlobName == "" { + // do nothing + } else { + resource = "b" + } + + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + signedIdentifier := v.Identifier + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName, v.Directory), + signedIdentifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + snapshotTime, // signed timestamp + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + preauthorizedAgentObjectID: v.PreauthorizedAgentObjectId, + agentObjectID: v.AgentObjectId, + correlationID: v.CorrelationId, + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// SignWithUserDelegation uses an account's UserDelegationCredential to sign this signature values to produce the proper SAS query parameters. +func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) { + if userDelegationCredential == nil { + return QueryParameters{}, fmt.Errorf("cannot sign SAS query without User Delegation Key") + } + + //Make sure the permission characters are in the correct order + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + resource := "c" + if !v.SnapshotTime.IsZero() { + resource = "bs" + } else if v.BlobVersion != "" { + resource = "bv" + } else if v.Directory != "" { + resource = "d" + v.BlobName = "" + } else if v.BlobName == "" { + // do nothing + } else { + resource = "b" + } + + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + udk := exported.GetUDKParams(userDelegationCredential) + + udkStart, udkExpiry, _ := formatTimesForSigning(*udk.SignedStart, *udk.SignedExpiry, time.Time{}) + //I don't like this answer to combining the functions + //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it. + signedIdentifier := strings.Join([]string{ + *udk.SignedOID, + *udk.SignedTID, + udkStart, + udkExpiry, + *udk.SignedService, + *udk.SignedVersion, + v.PreauthorizedAgentObjectId, + v.AgentObjectId, + v.CorrelationId, + }, "\n") + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(exported.GetAccountName(userDelegationCredential), v.ContainerName, v.BlobName, v.Directory), + signedIdentifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + snapshotTime, // signed timestamp + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + preauthorizedAgentObjectID: v.PreauthorizedAgentObjectId, + agentObjectID: v.AgentObjectId, + correlationID: v.CorrelationId, + // Calculated SAS signature + signature: signature, + } + + //User delegation SAS specific parameters + p.signedOID = *udk.SignedOID + p.signedTID = *udk.SignedTID + p.signedStart = *udk.SignedStart + p.signedExpiry = *udk.SignedExpiry + p.signedService = *udk.SignedService + p.signedVersion = *udk.SignedVersion + + return p, nil +} + +// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. +func getCanonicalName(account string, containerName string, blobName string, directoryName string) string { + // Container: "/blob/account/containername" + // Blob: "/blob/account/containername/blobname" + elements := []string{"/blob/", account, "/", containerName} + if blobName != "" { + elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) + } else if directoryName != "" { + elements = append(elements, "/", directoryName) + } + return strings.Join(elements, "") +} + +// ContainerPermissions type simplifies creating the permissions string for an Azure Storage container SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob +type ContainerPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool + Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only +} + +// String produces the SAS permissions string for an Azure Storage container. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p *ContainerPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.List { + b.WriteRune('l') + } + if p.Tag { + b.WriteRune('t') + } + if p.Execute { + b.WriteRune('e') + } + if p.ModifyOwnership { + b.WriteRune('o') + } + if p.ModifyPermissions { + b.WriteRune('p') + } + return b.String() +} + +// Parse initializes the ContainerSASPermissions' fields from a string. +/*func parseContainerPermissions(s string) (ContainerPermissions, error) { + p := ContainerPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'l': + p.List = true + case 't': + p.Tag = true + case 'e': + p.Execute = true + case 'o': + p.ModifyOwnership = true + case 'p': + p.ModifyPermissions = true + default: + return ContainerPermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +}*/ + +// BlobPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +type BlobPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions bool +} + +// String produces the SAS permissions string for an Azure Storage blob. +// Call this method to set BlobSignatureValues's Permissions field. +func (p *BlobPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.Tag { + b.WriteRune('t') + } + if p.List { + b.WriteRune('l') + } + if p.Move { + b.WriteRune('m') + } + if p.Execute { + b.WriteRune('e') + } + if p.Ownership { + b.WriteRune('o') + } + if p.Permissions { + b.WriteRune('p') + } + return b.String() +} + +// Parse initializes the BlobSASPermissions's fields from a string. +func parseBlobPermissions(s string) (BlobPermissions, error) { + p := BlobPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 't': + p.Tag = true + case 'l': + p.List = true + case 'm': + p.Move = true + case 'e': + p.Execute = true + case 'o': + p.Ownership = true + case 'p': + p.Permissions = true + default: + return BlobPermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go new file mode 100644 index 000000000000..57fe053f07ae --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go @@ -0,0 +1,166 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +const ( + snapshot = "snapshot" + versionId = "versionid" +) + +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/containername" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts struct { + Scheme string // Ex: "https://" + Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" + IPEndpointStyleInfo IPEndpointStyleInfo + ContainerName string // "" if no container + BlobName string // "" if no blob + Snapshot string // "" if not a snapshot + SAS QueryParameters + UnparsedParams string + VersionID string // "" if not versioning enabled +} + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. +// Any other query parameters remain in the UnparsedParams field. +func ParseURL(u string) (URLParts, error) { + uri, err := url.Parse(u) + if err != nil { + return URLParts{}, err + } + + up := URLParts{ + Scheme: uri.Scheme, + Host: uri.Host, + } + + // Find the container & blob names (if any) + if uri.Path != "" { + path := uri.Path + if path[0] == '/' { + path = path[1:] // If path starts with a slash, remove it + } + if shared.IsIPEndpointStyle(up.Host) { + if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob + up.IPEndpointStyleInfo.AccountName = path + path = "" // No ContainerName present in the URL so path should be empty + } else { + up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes + path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names) + } + } + + containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists) + if containerEndIndex == -1 { // Slash not found; path has container name & no blob name + up.ContainerName = path + } else { + up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes + up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash + } + } + + // Convert the query parameters to a case-sensitive map & trim whitespace + paramsMap := uri.Query() + + up.Snapshot = "" // Assume no snapshot + if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { + up.Snapshot = snapshotStr[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, snapshot) + } + + up.VersionID = "" // Assume no versionID + if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok { + up.VersionID = versionIDs[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, versionId) // delete "versionid" from paramsMap + delete(paramsMap, "versionId") // delete "versionId" from paramsMap + } + + up.SAS = NewQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up, nil +} + +// String returns a URL object whose fields are initialized from the URLParts fields. The URL's RawQuery +// field contains the SAS, snapshot, and unparsed query parameters. +func (up URLParts) String() string { + path := "" + if shared.IsIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + path += "/" + up.IPEndpointStyleInfo.AccountName + } + // Concatenate container & blob names (if they exist) + if up.ContainerName != "" { + path += "/" + up.ContainerName + if up.BlobName != "" { + path += "/" + up.BlobName + } + } + + rawQuery := up.UnparsedParams + + //If no snapshot is initially provided, fill it in from the SAS query properties to help the user + if up.Snapshot == "" && !up.SAS.SnapshotTime().IsZero() { + up.Snapshot = up.SAS.SnapshotTime().Format(exported.SnapshotTimeFormat) + } + + // Concatenate blob version id query parameter (if it exists) + if up.VersionID != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += versionId + "=" + up.VersionID + } + + // Concatenate blob snapshot query parameter (if it exists) + if up.Snapshot != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += snapshot + "=" + up.Snapshot + } + sas := up.SAS.Encode() + if sas != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += sas + } + u := url.URL{ + Scheme: up.Scheme, + Host: up.Host, + Path: path, + RawQuery: rawQuery, + } + return u.String() +} + +type caseInsensitiveValues url.Values // map[string][]string + +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go new file mode 100644 index 000000000000..724d92b91c93 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go @@ -0,0 +1,287 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "context" + "errors" + "net/http" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +// Client represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers. +type Client base.Client[generated.ServiceClient] + +// NewClient creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +// GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object. +// OAuth is required for this call, as well as any role that can delegate access to the storage account. +func (s *Client) GetUserDelegationCredential(ctx context.Context, info KeyInfo, o *GetUserDelegationCredentialOptions) (*UserDelegationCredential, error) { + url, err := blob.ParseURL(s.URL()) + if err != nil { + return nil, err + } + + getUserDelegationKeyOptions := o.format() + udk, err := s.generated().GetUserDelegationKey(ctx, info, getUserDelegationKeyOptions) + if err != nil { + return nil, err + } + + return exported.NewUserDelegationCredential(strings.Split(url.Host, ".")[0], udk.UserDelegationKey), nil +} + +func (s *Client) generated() *generated.ServiceClient { + return base.InnerClient((*base.Client[generated.ServiceClient])(s)) +} + +func (s *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ServiceClient])(s)) +} + +// URL returns the URL endpoint used by the Client object. +func (s *Client) URL() string { + return s.generated().Endpoint() +} + +// NewContainerClient creates a new ContainerClient object by concatenating containerName to the end of +// Client's URL. The new ContainerClient uses the same request policy pipeline as the Client. +// To change the pipeline, create the ContainerClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewContainerClient instead of calling this object's +// NewContainerClient method. +func (s *Client) NewContainerClient(containerName string) *container.Client { + containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName) + return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.sharedKey())) +} + +// CreateContainer is a lifecycle method to creates a new container under the specified account. +// If the container with the same name already exists, a ResourceExistsError will be raised. +// This method returns a client with which to interact with the newly created container. +func (s *Client) CreateContainer(ctx context.Context, containerName string, options *CreateContainerOptions) (CreateContainerResponse, error) { + containerClient := s.NewContainerClient(containerName) + containerCreateResp, err := containerClient.Create(ctx, options) + return containerCreateResp, err +} + +// DeleteContainer is a lifecycle method that marks the specified container for deletion. +// The container and any blobs contained within it are later deleted during garbage collection. +// If the container is not found, a ResourceNotFoundError will be raised. +func (s *Client) DeleteContainer(ctx context.Context, containerName string, options *DeleteContainerOptions) (DeleteContainerResponse, error) { + containerClient := s.NewContainerClient(containerName) + containerDeleteResp, err := containerClient.Delete(ctx, options) + return containerDeleteResp, err +} + +// RestoreContainer restores soft-deleted container +// Operation will only be successful if used within the specified number of days set in the delete retention policy +func (s *Client) RestoreContainer(ctx context.Context, deletedContainerName string, deletedContainerVersion string, options *RestoreContainerOptions) (RestoreContainerResponse, error) { + containerClient := s.NewContainerClient(deletedContainerName) + containerRestoreResp, err := containerClient.Restore(ctx, deletedContainerVersion, options) + return containerRestoreResp, err +} + +// GetAccountInfo provides account level information +func (s *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := s.generated().GetAccountInfo(ctx, getAccountInfoOptions) + return resp, err +} + +// NewListContainersPager operation returns a pager of the containers under the specified account. +// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] { + listOptions := generated.ServiceClientListContainersSegmentOptions{} + if o != nil { + if o.Include.Deleted { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeDeleted) + } + if o.Include.Metadata { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeMetadata) + } + listOptions.Marker = o.Marker + listOptions.Maxresults = o.MaxResults + listOptions.Prefix = o.Prefix + } + return runtime.NewPager(runtime.PagingHandler[ListContainersResponse]{ + More: func(page ListContainersResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListContainersResponse) (ListContainersResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.Marker + req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListContainersResponse{}, err + } + resp, err := s.generated().Pipeline().Do(req) + if err != nil { + return ListContainersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListContainersResponse{}, runtime.NewResponseError(resp) + } + return s.generated().ListContainersSegmentHandleResponse(resp) + }, + }) +} + +// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules. +func (s *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { + getPropertiesOptions := o.format() + resp, err := s.generated().GetProperties(ctx, getPropertiesOptions) + return resp, err +} + +// SetProperties Sets the properties of a storage account's Blob service, including Azure Storage Analytics. +// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved. +func (s *Client) SetProperties(ctx context.Context, o *SetPropertiesOptions) (SetPropertiesResponse, error) { + properties, setPropertiesOptions := o.format() + resp, err := s.generated().SetProperties(ctx, properties, setPropertiesOptions) + return resp, err +} + +// GetStatistics Retrieves statistics related to replication for the Blob service. +// It is only available when read-access geo-redundant replication is enabled for the storage account. +// With geo-redundant replication, Azure Storage maintains your data durable +// in two locations. In both locations, Azure Storage constantly maintains +// multiple healthy replicas of your data. The location where you read, +// create, update, or delete data is the primary storage account location. +// The primary location exists in the region you choose at the time you +// create an account via the Azure Management Azure classic portal, for +// example, North Central US. The location to which your data is replicated +// is the secondary location. The secondary location is automatically +// determined based on the location of the primary; it is in a second data +// center that resides in the same region as the primary location. Read-only +// access is available from the secondary location, if read-access geo-redundant +// replication is enabled for your storage account. +func (s *Client) GetStatistics(ctx context.Context, o *GetStatisticsOptions) (GetStatisticsResponse, error) { + getStatisticsOptions := o.format() + resp, err := s.generated().GetStatistics(ctx, getStatisticsOptions) + + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +// This validity can be checked with CanGetAccountSASToken(). +func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, services sas.AccountServices, start time.Time, expiry time.Time) (string, error) { + if s.sharedKey() == nil { + return "", errors.New("SAS can only be signed with a SharedKeyCredential") + } + + qps, err := sas.AccountSignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + Permissions: permissions.String(), + Services: services.String(), + ResourceTypes: resources.String(), + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(s.sharedKey()) + if err != nil { + return "", err + } + + endpoint := s.URL() + if !strings.HasSuffix(endpoint, "/") { + // add a trailing slash to be consistent with the portal + endpoint += "/" + } + endpoint += "?" + qps.Encode() + + return endpoint, nil +} + +// FilterBlobs operation finds all blobs in the storage account whose tags match a given search expression. +// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" +func (s *Client) FilterBlobs(ctx context.Context, o *FilterBlobsOptions) (FilterBlobsResponse, error) { + serviceFilterBlobsOptions := o.format() + resp, err := s.generated().FilterBlobs(ctx, serviceFilterBlobsOptions) + return resp, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go new file mode 100644 index 000000000000..20665fc2b79c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go @@ -0,0 +1,92 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +const ( + // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. + ContainerNameRoot = "$root" + + // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. + ContainerNameLogs = "$logs" +) + +// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS +type SKUName = generated.SKUName + +const ( + SKUNameStandardLRS SKUName = generated.SKUNameStandardLRS + SKUNameStandardGRS SKUName = generated.SKUNameStandardGRS + SKUNameStandardRAGRS SKUName = generated.SKUNameStandardRAGRS + SKUNameStandardZRS SKUName = generated.SKUNameStandardZRS + SKUNamePremiumLRS SKUName = generated.SKUNamePremiumLRS +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return generated.PossibleSKUNameValues() +} + +// ListContainersIncludeType defines values for ListContainersIncludeType +type ListContainersIncludeType = generated.ListContainersIncludeType + +const ( + ListContainersIncludeTypeMetadata ListContainersIncludeType = generated.ListContainersIncludeTypeMetadata + ListContainersIncludeTypeDeleted ListContainersIncludeType = generated.ListContainersIncludeTypeDeleted + ListContainersIncludeTypeSystem ListContainersIncludeType = generated.ListContainersIncludeTypeSystem +) + +// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return generated.PossibleListContainersIncludeTypeValues() +} + +// AccountKind defines values for AccountKind +type AccountKind = generated.AccountKind + +const ( + AccountKindStorage AccountKind = generated.AccountKindStorage + AccountKindBlobStorage AccountKind = generated.AccountKindBlobStorage + AccountKindStorageV2 AccountKind = generated.AccountKindStorageV2 + AccountKindFileStorage AccountKind = generated.AccountKindFileStorage + AccountKindBlockBlobStorage AccountKind = generated.AccountKindBlockBlobStorage +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return generated.PossibleAccountKindValues() +} + +// BlobGeoReplicationStatus - The status of the secondary location +type BlobGeoReplicationStatus = generated.BlobGeoReplicationStatus + +const ( + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusLive + BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusBootstrap + BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusUnavailable +) + +// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. +func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { + return generated.PossibleBlobGeoReplicationStatusValues() +} + +// PublicAccessType defines values for AccessType - private (default) or blob or container +type PublicAccessType = generated.PublicAccessType + +const ( + PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob + PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return generated.PossiblePublicAccessTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go new file mode 100644 index 000000000000..b0354cd90e3e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go @@ -0,0 +1,220 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential = exported.UserDelegationCredential + +// UserDelegationKey contains UserDelegationKey. +type UserDelegationKey = generated.UserDelegationKey + +// KeyInfo contains KeyInfo struct. +type KeyInfo = generated.KeyInfo + +// GetUserDelegationCredentialOptions contains optional parameters for Service.GetUserDelegationKey method +type GetUserDelegationCredentialOptions struct { + // placeholder for future options +} + +func (o *GetUserDelegationCredentialOptions) format() *generated.ServiceClientGetUserDelegationKeyOptions { + return nil +} + +// AccessConditions identifies container-specific access conditions which you optionally set. +type AccessConditions = exported.ContainerAccessConditions + +// CpkInfo contains a group of parameters for the BlobClient.Download method. +type CpkInfo = generated.CpkInfo + +// CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CpkScopeInfo = generated.CpkScopeInfo + +// CreateContainerOptions contains the optional parameters for the container.Client.Create method. +type CreateContainerOptions = container.CreateOptions + +// DeleteContainerOptions contains the optional parameters for the container.Client.Delete method. +type DeleteContainerOptions = container.DeleteOptions + +// RestoreContainerOptions contains the optional parameters for the container.Client.Restore method. +type RestoreContainerOptions = container.RestoreOptions + +// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CorsRule = generated.CorsRule + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy = generated.RetentionPolicy + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics = generated.Metrics + +// Logging - Azure Analytics Logging settings. +type Logging = generated.Logging + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite = generated.StaticWebsite + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties = generated.StorageServiceProperties + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats = generated.StorageServiceStats + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.ServiceClientGetAccountInfoOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. +type GetPropertiesOptions struct { + // placeholder for future options +} + +func (o *GetPropertiesOptions) format() *generated.ServiceClientGetPropertiesOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListContainersOptions provides set of configurations for ListContainers operation +type ListContainersOptions struct { + Include ListContainersInclude + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing operation did not return all containers + // remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in + // a subsequent call to request the next page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify max results, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, + // then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible + // that the service will return fewer results than specified by max results, or than the default of 5000. + MaxResults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +// ListContainersInclude indicates what additional information the service should return with each container. +type ListContainersInclude struct { + // Tells the service whether to return metadata for each container. + Metadata bool + + // Tells the service whether to return soft-deleted containers. + Deleted bool +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetPropertiesOptions provides set of options for Client.SetProperties +type SetPropertiesOptions struct { + // The set of CORS rules. + Cors []*CorsRule + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics + + // Azure Analytics Logging settings. + Logging *Logging + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite +} + +func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *generated.ServiceClientSetPropertiesOptions) { + if o == nil { + return generated.StorageServiceProperties{}, nil + } + + return generated.StorageServiceProperties{ + Cors: o.Cors, + DefaultServiceVersion: o.DefaultServiceVersion, + DeleteRetentionPolicy: o.DeleteRetentionPolicy, + HourMetrics: o.HourMetrics, + Logging: o.Logging, + MinuteMetrics: o.MinuteMetrics, + StaticWebsite: o.StaticWebsite, + }, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetStatisticsOptions provides set of options for Client.GetStatistics +type GetStatisticsOptions struct { + // placeholder for future options +} + +func (o *GetStatisticsOptions) format() *generated.ServiceClientGetStatisticsOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// FilterBlobsOptions provides set of options for Client.FindBlobsByTags +type FilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only to return only blobs whose tags match the specified expression. + Where *string +} + +func (o *FilterBlobsOptions) format() *generated.ServiceClientFilterBlobsOptions { + if o == nil { + return nil + } + return &generated.ServiceClientFilterBlobsOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Where: o.Where, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go new file mode 100644 index 000000000000..788514cca372 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateContainerResponse contains the response from method container.Client.Create. +type CreateContainerResponse = generated.ContainerClientCreateResponse + +// DeleteContainerResponse contains the response from method container.Client.Delete +type DeleteContainerResponse = generated.ContainerClientDeleteResponse + +// RestoreContainerResponse contains the response from method container.Client.Restore +type RestoreContainerResponse = generated.ContainerClientRestoreResponse + +// GetAccountInfoResponse contains the response from method Client.GetAccountInfo. +type GetAccountInfoResponse = generated.ServiceClientGetAccountInfoResponse + +// ListContainersResponse contains the response from method Client.ListContainersSegment. +type ListContainersResponse = generated.ServiceClientListContainersSegmentResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.ServiceClientGetPropertiesResponse + +// SetPropertiesResponse contains the response from method Client.SetProperties. +type SetPropertiesResponse = generated.ServiceClientSetPropertiesResponse + +// GetStatisticsResponse contains the response from method Client.GetStatistics. +type GetStatisticsResponse = generated.ServiceClientGetStatisticsResponse + +// FilterBlobsResponse contains the response from method Client.FilterBlobs. +type FilterBlobsResponse = generated.ServiceClientFilterBlobsResponse + +// GetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. +type GetUserDelegationKeyResponse = generated.ServiceClientGetUserDelegationKeyResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json new file mode 100644 index 000000000000..b23e56abbb9c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json @@ -0,0 +1,513 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "String" + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The principal to assign the role to. This is application object id." + } + } + }, + "variables": { + "mgmtApiVersion": "2019-06-01", + "authorizationApiVersion": "2018-09-01-preview", + "blobDataContributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe')]", + "contributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c')]", + "blobDataOwnerRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "primaryEncryptionScopeName": "encryptionScope", + "primaryEncryptionScope": "[concat(parameters('baseName'), 'prim', concat('/', variables('primaryEncryptionScopeName')))]", + "secondaryAccountName": "[concat(parameters('baseName'), 'sec')]", + "premiumAccountName": "[concat(parameters('baseName'), 'prem')]", + "dataLakeAccountName": "[concat(parameters('baseName'), 'dtlk')]", + "softDeleteAccountName": "[concat(parameters('baseName'), 'sftdl')]", + "premiumFileAccountName": "[concat(parameters('baseName'), 'pfile')]", + "webjobsPrimaryAccountName": "[concat(parameters('baseName'), 'wjprim')]", + "webjobsSecondaryAccountName": "[concat(parameters('baseName'), 'wjsec')]", + "location": "[resourceGroup().location]", + "resourceGroupName": "[resourceGroup().name]", + "subscriptionId": "[subscription().subscriptionId]", + "encryption": { + "services": { + "file": { + "enabled": true + }, + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('dataContributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataContributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('contributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('contributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('blobDataOwnerRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataOwnerRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/encryptionScopes", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryEncryptionScope')]", + "properties": { + "source": "Microsoft.Storage", + "state": "Enabled" + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('secondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('dataLakeAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "isHnsEnabled": true, + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('dataLakeAccountName'), '/default')]", + "properties": { + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('dataLakeAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('softDeleteAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "deleteRetentionPolicy": { + "enabled": true, + "days": 1 + }, + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/fileServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "shareDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumFileAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "FileStorage", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsPrimaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsSecondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + } + ], + "functions": [ + { + "namespace": "url", + "members": { + "serviceEndpointSuffix": { + "parameters": [ + { + "name": "endpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[substring(parameters('endpoint'), add(indexOf(parameters('endpoint'), '.'),1), sub(length(parameters('endpoint')), add(indexOf(parameters('endpoint'), '.'),2)))]" + } + } + } + }, + { + "namespace": "connectionString", + "members": { + "create": { + "parameters": [ + { + "name": "accountName", + "type": "string" + }, + { + "name": "accountKey", + "type": "string" + }, + { + "name": "blobEndpoint", + "type": "string" + }, + { + "name": "queueEndpoint", + "type": "string" + }, + { + "name": "fileEndpoint", + "type": "string" + }, + { + "name": "tableEndpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=', parameters('accountName'), ';AccountKey=', parameters('accountKey'), ';BlobEndpoint=', parameters('blobEndpoint'), ';QueueEndpoint=', parameters('queueEndpoint'), ';FileEndpoint=', parameters('fileEndpoint'), ';TableEndpoint=', parameters('tableEndpoint'))]" + } + } + } + } + ], + "outputs": { + "AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PRIMARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "PRIMARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "PRIMARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "PRIMARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SECONDARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SECONDARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SECONDARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SECONDARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "BLOB_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumAccountName')]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('dataLakeAccountName')]" + }, + "DATALAKE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "DATALAKE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "DATALAKE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "DATALAKE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('softDeleteAccountName')]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumFileAccountName')]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "AZUREWEBJOBSSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsPrimaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "AZUREWEBJOBSSECONDARYSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsSecondaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "RESOURCE_GROUP_NAME": { + "type": "string", + "value": "[variables('resourceGroupName')]" + }, + "SUBSCRIPTION_ID": { + "type": "string", + "value": "[variables('subscriptionId')]" + }, + "LOCATION": { + "type": "string", + "value": "[variables('location')]" + }, + "AZURE_STORAGE_ENCRYPTION_SCOPE": { + "type": "string", + "value": "[variables('primaryEncryptionScopeName')]" + } + } + } + \ No newline at end of file diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 1a9c8ab537f0..c90209a9484a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -177,7 +177,7 @@ func (t Token) WillExpireIn(d time.Duration) bool { return !t.Expires().After(time.Now().Add(d)) } -//OAuthToken return the current access token +// OAuthToken return the current access token func (t *Token) OAuthToken() string { return t.AccessToken } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE new file mode 100644 index 000000000000..3d8b93bc7987 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go new file mode 100644 index 000000000000..259ca6d56f4c --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go @@ -0,0 +1,39 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package cache allows third parties to implement external storage for caching token data +for distributed systems or multiple local applications access. + +The data stored and extracted will represent the entire cache. Therefore it is recommended +one msal instance per user. This data is considered opaque and there are no guarantees to +implementers on the format being passed. +*/ +package cache + +// Marshaler marshals data from an internal cache to bytes that can be stored. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Unmarshaler unmarshals data from a storage medium into the internal cache, overwriting it. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Serializer can serialize the cache to binary or from binary into the cache. +type Serializer interface { + Marshaler + Unmarshaler +} + +// ExportReplace is used export or replace what is in the cache. +type ExportReplace interface { + // Replace replaces the cache with what is in external storage. + // key is the suggested key which can be used for partioning the cache + Replace(cache Unmarshaler, key string) + // Export writes the binary representation of the cache (cache.Marshal()) to + // external storage. This is considered opaque. + // key is the suggested key which can be used for partioning the cache + Export(cache Marshaler, key string) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go new file mode 100644 index 000000000000..11a33de73a1e --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -0,0 +1,510 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package confidential provides a client for authentication of "confidential" applications. +A "confidential" application is defined as an app that run on servers. They are considered +difficult to access and for that reason capable of keeping an application secret. +Confidential clients can hold configuration-time secrets. +*/ +package confidential + +import ( + "context" + "crypto" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "net/url" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +/* +Design note: + +confidential.Client uses base.Client as an embedded type. base.Client statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. + +Duplicate Calls shared between public.Client and this package: +There is some duplicate call options provided here that are the same as in public.Client . This +is a design choices. Go proverb(https://www.youtube.com/watch?v=PAAkCSZUG1c&t=9m28s): +"a little copying is better than a little dependency". Yes, we could have another package with +shared options (fail). That divides like 2 options from all others which makes the user look +through more docs. We can have all clients in one package, but I think separate packages +here makes for better naming (public.Client vs client.PublicClient). So I chose a little +duplication. + +.Net People, Take note on X509: +This uses x509.Certificates and private keys. x509 does not store private keys. .Net +has some x509.Certificate2 thing that has private keys, but that is just some bullcrap that .Net +added, it doesn't exist in real life. Seriously, "x509.Certificate2", bahahahaha. As such I've +put a PEM decoder into here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be include in the package documentation. + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type Account = shared.Account + +// CertFromPEM converts a PEM file (.pem or .key) for use with NewCredFromCert(). The file +// must contain the public certificate and the private key. If a PEM block is encrypted and +// password is not an empty string, it attempts to decrypt the PEM blocks using the password. +// Multiple certs are due to certificate chaining for use cases like TLS that sign from root to leaf. +func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.PrivateKey, error) { + var certs []*x509.Certificate + var priv crypto.PrivateKey + for { + block, rest := pem.Decode(pemData) + if block == nil { + break + } + + //nolint:staticcheck // x509.IsEncryptedPEMBlock and x509.DecryptPEMBlock are deprecated. They are used here only to support a usecase. + if x509.IsEncryptedPEMBlock(block) { + b, err := x509.DecryptPEMBlock(block, []byte(password)) + if err != nil { + return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %v", err) + } + block, _ = pem.Decode(b) + if block == nil { + return nil, nil, fmt.Errorf("encounter encrypted PEM block that did not decode") + } + } + + switch block.Type { + case "CERTIFICATE": + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be parsed by x509: %v", err) + } + certs = append(certs, cert) + case "PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + + var err error + priv, err = x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + case "RSA PRIVATE KEY": + if priv != nil { + return nil, nil, errors.New("found multiple private key blocks") + } + var err error + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf("could not decode private key: %v", err) + } + } + pemData = rest + } + + if len(certs) == 0 { + return nil, nil, fmt.Errorf("no certificates found") + } + + if priv == nil { + return nil, nil, fmt.Errorf("no private key found") + } + + return certs, priv, nil +} + +// AssertionRequestOptions has required information for client assertion claims +type AssertionRequestOptions = exported.AssertionRequestOptions + +// Credential represents the credential used in confidential client flows. +type Credential struct { + secret string + + cert *x509.Certificate + key crypto.PrivateKey + x5c []string + + assertionCallback func(context.Context, AssertionRequestOptions) (string, error) + + tokenProvider func(context.Context, TokenProviderParameters) (TokenProviderResult, error) +} + +// toInternal returns the accesstokens.Credential that is used internally. The current structure of the +// code requires that client.go, requests.go and confidential.go share a credential type without +// having import recursion. That requires the type used between is in a shared package. Therefore +// we have this. +func (c Credential) toInternal() (*accesstokens.Credential, error) { + if c.secret != "" { + return &accesstokens.Credential{Secret: c.secret}, nil + } + if c.cert != nil { + if c.key == nil { + return nil, errors.New("missing private key for certificate") + } + return &accesstokens.Credential{Cert: c.cert, Key: c.key, X5c: c.x5c}, nil + } + if c.key != nil { + return nil, errors.New("missing certificate for private key") + } + if c.assertionCallback != nil { + return &accesstokens.Credential{AssertionCallback: c.assertionCallback}, nil + } + if c.tokenProvider != nil { + return &accesstokens.Credential{TokenProvider: c.tokenProvider}, nil + } + return nil, errors.New("invalid credential") +} + +// NewCredFromSecret creates a Credential from a secret. +func NewCredFromSecret(secret string) (Credential, error) { + if secret == "" { + return Credential{}, errors.New("secret can't be empty string") + } + return Credential{secret: secret}, nil +} + +// NewCredFromAssertion creates a Credential from a signed assertion. +// +// Deprecated: a Credential created by this function can't refresh the +// assertion when it expires. Use NewCredFromAssertionCallback instead. +func NewCredFromAssertion(assertion string) (Credential, error) { + if assertion == "" { + return Credential{}, errors.New("assertion can't be empty string") + } + return NewCredFromAssertionCallback(func(context.Context, AssertionRequestOptions) (string, error) { return assertion, nil }), nil +} + +// NewCredFromAssertionCallback creates a Credential that invokes a callback to get assertions +// authenticating the application. The callback must be thread safe. +func NewCredFromAssertionCallback(callback func(context.Context, AssertionRequestOptions) (string, error)) Credential { + return Credential{assertionCallback: callback} +} + +// NewCredFromCert creates a Credential from an x509.Certificate and an RSA private key. +// CertFromPEM() can be used to get these values from a PEM file. +func NewCredFromCert(cert *x509.Certificate, key crypto.PrivateKey) Credential { + cred, _ := NewCredFromCertChain([]*x509.Certificate{cert}, key) + return cred +} + +// NewCredFromCertChain creates a Credential from a chain of x509.Certificates and an RSA private key +// as returned by CertFromPEM(). +func NewCredFromCertChain(certs []*x509.Certificate, key crypto.PrivateKey) (Credential, error) { + cred := Credential{key: key} + k, ok := key.(*rsa.PrivateKey) + if !ok { + return cred, errors.New("key must be an RSA key") + } + for _, cert := range certs { + if cert == nil { + // not returning an error here because certs may still contain a sufficient cert/key pair + continue + } + certKey, ok := cert.PublicKey.(*rsa.PublicKey) + if ok && k.E == certKey.E && k.N.Cmp(certKey.N) == 0 { + // We know this is the signing cert because its public key matches the given private key. + // This cert must be first in x5c. + cred.cert = cert + cred.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cred.x5c...) + } else { + cred.x5c = append(cred.x5c, base64.StdEncoding.EncodeToString(cert.Raw)) + } + } + if cred.cert == nil { + return cred, errors.New("key doesn't match any certificate") + } + return cred, nil +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters = exported.TokenProviderParameters + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult = exported.TokenProviderResult + +// NewCredFromTokenProvider creates a Credential from a function that provides access tokens. The function +// must be concurrency safe. This is intended only to allow the Azure SDK to cache MSI tokens. It isn't +// useful to applications in general because the token provider must implement all authentication logic. +func NewCredFromTokenProvider(provider func(context.Context, TokenProviderParameters) (TokenProviderResult, error)) Credential { + return Credential{tokenProvider: provider} +} + +// AutoDetectRegion instructs MSAL Go to auto detect region for Azure regional token service. +func AutoDetectRegion() string { + return "TryAutoDetect" +} + +// Client is a representation of authentication client for confidential applications as defined in the +// package doc. A new Client should be created PER SERVICE USER. +// For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications +type Client struct { + base base.Client + + cred *accesstokens.Credential + + // userID is some unique identifier for a user. It actually isn't used by us at all, it + // simply acts as another hint that a confidential.Client is for a single user. + userID string +} + +// Options are optional settings for New(). These options are set using various functions +// returning Option calls. +type Options struct { + // Accessor controls cache persistence. + // By default there is no cache persistence. This can be set using the WithAccessor() option. + Accessor cache.ExportReplace + + // The host of the Azure Active Directory authority. + // The default is https://login.microsoftonline.com/common. This can be changed using the + // WithAuthority() option. + Authority string + + // The HTTP client used for making requests. + // It defaults to a shared http.Client. + HTTPClient ops.HTTPClient + + // SendX5C specifies if x5c claim(public key of the certificate) should be sent to STS. + SendX5C bool + + // Instructs MSAL Go to use an Azure regional token service with sepcified AzureRegion. + AzureRegion string +} + +func (o Options) validate() error { + u, err := url.Parse(o.Authority) + if err != nil { + return fmt.Errorf("the Authority(%s) does not parse as a valid URL", o.Authority) + } + if u.Scheme != "https" { + return fmt.Errorf("the Authority(%s) does not appear to use https", o.Authority) + } + return nil +} + +// Option is an optional argument to New(). +type Option func(o *Options) + +// WithAuthority allows you to provide a custom authority for use in the client. +func WithAuthority(authority string) Option { + return func(o *Options) { + o.Authority = authority + } +} + +// WithAccessor provides a cache accessor that will read and write to some externally managed cache +// that may or may not be shared with other applications. +func WithAccessor(accessor cache.ExportReplace) Option { + return func(o *Options) { + o.Accessor = accessor + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *Options) { + o.HTTPClient = httpClient + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C() Option { + return func(o *Options) { + o.SendX5C = true + } +} + +// WithAzureRegion sets the region(preferred) or Confidential.AutoDetectRegion() for auto detecting region. +// Region names as per https://azure.microsoft.com/en-ca/global-infrastructure/geographies/. +// See https://aka.ms/region-map for more details on region names. +// The region value should be short region name for the region where the service is deployed. +// For example "centralus" is short name for region Central US. +// Not all auth flows can use the regional token service. +// Service To Service (client credential flow) tokens can be obtained from the regional service. +// Requires configuration at the tenant level. +// Auto-detection works on a limited number of Azure artifacts (VMs, Azure functions). +// If auto-detection fails, the non-regional endpoint will be used. +// If an invalid region name is provided, the non-regional endpoint MIGHT be used or the token request MIGHT fail. +func WithAzureRegion(val string) Option { + return func(o *Options) { + o.AzureRegion = val + } +} + +// New is the constructor for Client. userID is the unique identifier of the user this client +// will store credentials for (a Client is per user). clientID is the Azure clientID and cred is +// the type of credential to use. +func New(clientID string, cred Credential, options ...Option) (Client, error) { + internalCred, err := cred.toInternal() + if err != nil { + return Client{}, err + } + + opts := Options{ + Authority: base.AuthorityPublicCloud, + HTTPClient: shared.DefaultClient, + } + + for _, o := range options { + o(&opts) + } + if err := opts.validate(); err != nil { + return Client{}, err + } + + baseOpts := []base.Option{ + base.WithCacheAccessor(opts.Accessor), + base.WithRegionDetection(opts.AzureRegion), + base.WithX5C(opts.SendX5C), + } + if cred.tokenProvider != nil { + // The caller will handle all details of authentication, using Client only as a token cache. + // Declaring the authority host known prevents unnecessary metadata discovery requests. (The + // authority is irrelevant to Client and friends because the token provider is responsible + // for authentication.) + parsed, err := url.Parse(opts.Authority) + if err != nil { + return Client{}, errors.New("invalid authority") + } + baseOpts = append(baseOpts, base.WithKnownAuthorityHosts([]string{parsed.Hostname()})) + } + base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), baseOpts...) + if err != nil { + return Client{}, err + } + + return Client{base: base, cred: internalCred}, nil +} + +// UserID is the unique user identifier this client if for. +func (cca Client) UserID() string { + return cca.userID +} + +// AuthCodeURL creates a URL used to acquire an authorization code. Users need to call CreateAuthorizationCodeURLParameters and pass it in. +func (cca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string) (string, error) { + return cca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, cca.base.AuthParams) +} + +// AcquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type AcquireTokenSilentOptions struct { + // Account represents the account to use. To set, use the WithSilentAccount() option. + Account Account +} + +// AcquireTokenSilentOption changes options inside AcquireTokenSilentOptions used in .AcquireTokenSilent(). +type AcquireTokenSilentOption func(a *AcquireTokenSilentOptions) + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) AcquireTokenSilentOption { + return func(a *AcquireTokenSilentOptions) { + a.Account = account + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, options ...AcquireTokenSilentOption) (AuthResult, error) { + opts := AcquireTokenSilentOptions{} + for _, o := range options { + o(&opts) + } + var isAppCache bool + if opts.Account.IsZero() { + isAppCache = true + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: opts.Account, + RequestType: accesstokens.ATConfidential, + Credential: cca.cred, + IsAppCache: isAppCache, + } + + return cca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// AcquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type AcquireTokenByAuthCodeOptions struct { + Challenge string +} + +// AcquireTokenByAuthCodeOption changes options inside AcquireTokenByAuthCodeOptions used in .AcquireTokenByAuthCode(). +type AcquireTokenByAuthCodeOption func(a *AcquireTokenByAuthCodeOptions) + +// WithChallenge allows you to provide a challenge for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) AcquireTokenByAuthCodeOption { + return func(a *AcquireTokenByAuthCodeOptions) { + a.Challenge = challenge + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...AcquireTokenByAuthCodeOption) (AuthResult, error) { + opts := AcquireTokenByAuthCodeOptions{} + for _, o := range options { + o(&opts) + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: opts.Challenge, + AppType: accesstokens.ATConfidential, + Credential: cca.cred, // This setting differs from public.Client.AcquireTokenByAuthCode + RedirectURI: redirectURI, + } + + return cca.base.AcquireTokenByAuthCode(ctx, params) +} + +// AcquireTokenByCredential acquires a security token from the authority, using the client credentials grant. +func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string) (AuthResult, error) { + authParams := cca.base.AuthParams + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATClientCredentials + + token, err := cca.base.Token.Credential(ctx, authParams, cca.cred) + if err != nil { + return AuthResult{}, err + } + return cca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +// Refer https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow. +func (cca Client) AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string) (AuthResult, error) { + params := base.AcquireTokenOnBehalfOfParameters{ + Scopes: scopes, + UserAssertion: userAssertion, + Credential: cca.cred, + } + return cca.base.AcquireTokenOnBehalfOf(ctx, params) +} + +// Account gets the account in the token cache with the specified homeAccountID. +func (cca Client) Account(homeAccountID string) Account { + return cca.base.Account(homeAccountID) +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (cca Client) RemoveAccount(account Account) error { + cca.base.RemoveAccount(account) + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md new file mode 100644 index 000000000000..34a699f48018 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md @@ -0,0 +1,111 @@ +# MSAL Error Design + +Author: Abhidnya Patil(abhidnya.patil@microsoft.com) + +Contributors: + +- John Doak(jdoak@microsoft.com) +- Keegan Caruso(Keegan.Caruso@microsoft.com) +- Joel Hendrix(jhendrix@microsoft.com) + +## Background + +Errors in MSAL are intended for app developers to troubleshoot and not for displaying to end-users. + +### Go error handling vs other MSAL languages + +Most modern languages use exception based errors. Simply put, you "throw" an exception and it must be caught at some routine in the upper stack or it will eventually crash the program. + +Go doesn't use exceptions, instead it relies on multiple return values, one of which can be the builtin error interface type. It is up to the user to decide what to do. + +### Go custom error types + +Errors can be created in Go by simply using errors.New() or fmt.Errorf() to create an "error". + +Custom errors can be created in multiple ways. One of the more robust ways is simply to satisfy the error interface: + +```go +type MyCustomErr struct { + Msg string +} +func (m MyCustomErr) Error() string { // This implements "error" + return m.Msg +} +``` + +### MSAL Error Goals + +- Provide diagnostics to the user and for tickets that can be used to track down bugs or client misconfigurations +- Detect errors that are transitory and can be retried +- Allow the user to identify certain errors that the program can respond to, such a informing the user for the need to do an enrollment + +## Implementing Client Side Errors + +Client side errors indicate a misconfiguration or passing of bad arguments that is non-recoverable. Retrying isn't possible. + +These errors can simply be standard Go errors created by errors.New() or fmt.Errorf(). If down the line we need a custom error, we can introduce it, but for now the error messages just need to be clear on what the issue was. + +## Implementing Service Side Errors + +Service side errors occur when an external RPC responds either with an HTTP error code or returns a message that includes an error. + +These errors can be transitory (please slow down) or permanent (HTTP 404). To provide our diagnostic goals, we require the ability to differentiate these errors from other errors. + +The current implementation includes a specialized type that captures any error from the server: + +```go +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} +``` + +A user will always receive the most concise error we provide. They can tell if it is a server side error using Go error package: + +```go +var callErr CallErr +if errors.As(err, &callErr) { + ... +} +``` + +We provide a Verbose() function that can retrieve the most verbose message from any error we provide: + +```go +fmt.Println(errors.Verbose(err)) +``` + +If further differentiation is required, we can add custom errors that use Go error wrapping on top of CallErr to achieve our diagnostic goals (such as detecting when to retry a call due to transient errors). + +CallErr is always thrown from the comm package (which handles all http requests) and looks similar to: + +```go +return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, ErrorResponse), //ErrorResponse is the json body extracted from the http response + } +``` + +## Future Decisions + +The ability to retry calls needs to have centralized responsibility. Either the user is doing it or the client is doing it. + +If the user should be responsible, our errors package will include a CanRetry() function that will inform the user if the error provided to them is retryable. This is based on the http error code and possibly the type of error that was returned. It would also include a sleep time if the server returned an amount of time to wait. + +Otherwise we will do this internally and retries will be left to us. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go new file mode 100644 index 000000000000..c9b8dbed088d --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package errors + +import ( + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strings" + + "github.com/kylelemons/godebug/pretty" +) + +var prettyConf = &pretty.Config{ + IncludeUnexported: false, + SkipZeroFields: true, + TrackCycles: true, + Formatter: map[reflect.Type]interface{}{ + reflect.TypeOf((*io.Reader)(nil)).Elem(): func(r io.Reader) string { + b, err := io.ReadAll(r) + if err != nil { + return "could not read io.Reader content" + } + return string(b) + }, + }, +} + +type verboser interface { + Verbose() string +} + +// Verbose prints the most verbose error that the error message has. +func Verbose(err error) string { + build := strings.Builder{} + for { + if err == nil { + break + } + if v, ok := err.(verboser); ok { + build.WriteString(v.Verbose()) + } else { + build.WriteString(err.Error()) + } + err = errors.Unwrap(err) + } + return build.String() +} + +// New is equivalent to errors.New(). +func New(text string) error { + return errors.New(text) +} + +// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the +// http.Request and Response objects. Implements error. +type CallErr struct { + Req *http.Request + // Resp contains response body + Resp *http.Response + Err error +} + +// Errors implements error.Error(). +func (e CallErr) Error() string { + return e.Err.Error() +} + +// Verbose prints a versbose error message with the request or response. +func (e CallErr) Verbose() string { + e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need + e.Resp.TLS = nil // Same + return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp)) +} + +// Is reports whether any error in errors chain matches target. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As finds the first error in errors chain that matches target, +// and if so, sets target to that error value and returns true. +// Otherwise, it returns false. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go new file mode 100644 index 000000000000..a86f06400ba3 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -0,0 +1,406 @@ +// Package base contains a "Base" client that is used by the external public.Client and confidential.Client. +// Base holds shared attributes that must be available to both clients and methods that act as +// shared calls. +package base + +import ( + "context" + "errors" + "fmt" + "net/url" + "reflect" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +const ( + // AuthorityPublicCloud is the default AAD authority host + AuthorityPublicCloud = "https://login.microsoftonline.com/common" + scopeSeparator = " " +) + +// manager provides an internal cache. It is defined to allow faking the cache in tests. +// In all production use it is a *storage.Manager. +type manager interface { + Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (storage.TokenResponse, error) + Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) + AllAccounts() []shared.Account + Account(homeAccountID string) shared.Account + RemoveAccount(account shared.Account, clientID string) +} + +// partitionedManager provides an internal cache. It is defined to allow faking the cache in tests. +// In all production use it is a *storage.PartitionedManager. +type partitionedManager interface { + Read(ctx context.Context, authParameters authority.AuthParams) (storage.TokenResponse, error) + Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) +} + +type noopCacheAccessor struct{} + +func (n noopCacheAccessor) Replace(cache cache.Unmarshaler, key string) {} +func (n noopCacheAccessor) Export(cache cache.Marshaler, key string) {} + +// AcquireTokenSilentParameters contains the parameters to acquire a token silently (from cache). +type AcquireTokenSilentParameters struct { + Scopes []string + Account shared.Account + RequestType accesstokens.AppType + Credential *accesstokens.Credential + IsAppCache bool + UserAssertion string + AuthorizationType authority.AuthorizeType +} + +// AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow. +// To use PKCE, set the CodeChallengeParameter. +// Code challenges are used to secure authorization code grants; for more information, visit +// https://tools.ietf.org/html/rfc7636. +type AcquireTokenAuthCodeParameters struct { + Scopes []string + Code string + Challenge string + RedirectURI string + AppType accesstokens.AppType + Credential *accesstokens.Credential +} + +type AcquireTokenOnBehalfOfParameters struct { + Scopes []string + Credential *accesstokens.Credential + UserAssertion string +} + +// AuthResult contains the results of one token acquisition operation in PublicClientApplication +// or ConfidentialClientApplication. For details see https://aka.ms/msal-net-authenticationresult +type AuthResult struct { + Account shared.Account + IDToken accesstokens.IDToken + AccessToken string + ExpiresOn time.Time + GrantedScopes []string + DeclinedScopes []string +} + +// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). +func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { + if err := storageTokenResponse.AccessToken.Validate(); err != nil { + return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err) + } + + account := storageTokenResponse.Account + accessToken := storageTokenResponse.AccessToken.Secret + grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator) + + // Checking if there was an ID token in the cache; this will throw an error in the case of confidential client applications. + var idToken accesstokens.IDToken + if !storageTokenResponse.IDToken.IsZero() { + err := idToken.UnmarshalJSON([]byte(storageTokenResponse.IDToken.Secret)) + if err != nil { + return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) + } + } + return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil +} + +// NewAuthResult creates an AuthResult. +func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Account) (AuthResult, error) { + if len(tokenResponse.DeclinedScopes) > 0 { + return AuthResult{}, fmt.Errorf("token response failed because declined scopes are present: %s", strings.Join(tokenResponse.DeclinedScopes, ",")) + } + return AuthResult{ + Account: account, + IDToken: tokenResponse.IDToken, + AccessToken: tokenResponse.AccessToken, + ExpiresOn: tokenResponse.ExpiresOn.T, + GrantedScopes: tokenResponse.GrantedScopes.Slice, + }, nil +} + +// Client is a base client that provides access to common methods and primatives that +// can be used by multiple clients. +type Client struct { + Token *oauth.Client + manager manager // *storage.Manager or fakeManager in tests + pmanager partitionedManager // *storage.PartitionedManager or fakeManager in tests + + AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New(). + cacheAccessor cache.ExportReplace +} + +// Option is an optional argument to the New constructor. +type Option func(c *Client) + +// WithCacheAccessor allows you to set some type of cache for storing authentication tokens. +func WithCacheAccessor(ca cache.ExportReplace) Option { + return func(c *Client) { + if ca != nil { + c.cacheAccessor = ca + } + } +} + +// WithKnownAuthorityHosts specifies hosts Client shouldn't validate or request metadata for because they're known to the user +func WithKnownAuthorityHosts(hosts []string) Option { + return func(c *Client) { + cp := make([]string, len(hosts)) + copy(cp, hosts) + c.AuthParams.KnownAuthorityHosts = cp + } +} + +// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication. +func WithX5C(sendX5C bool) Option { + return func(c *Client) { + c.AuthParams.SendX5C = sendX5C + } +} + +func WithRegionDetection(region string) Option { + return func(c *Client) { + c.AuthParams.AuthorityInfo.Region = region + } +} + +// New is the constructor for Base. +func New(clientID string, authorityURI string, token *oauth.Client, options ...Option) (Client, error) { + authInfo, err := authority.NewInfoFromAuthorityURI(authorityURI, true) + if err != nil { + return Client{}, err + } + authParams := authority.NewAuthParams(clientID, authInfo) + client := Client{ // Note: Hey, don't even THINK about making Base into *Base. See "design notes" in public.go and confidential.go + Token: token, + AuthParams: authParams, + cacheAccessor: noopCacheAccessor{}, + manager: storage.New(token), + pmanager: storage.NewPartitionedManager(token), + } + for _, o := range options { + o(&client) + } + return client, nil + +} + +// AuthCodeURL creates a URL used to acquire an authorization code. +func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, authParams authority.AuthParams) (string, error) { + endpoints, err := b.Token.ResolveEndpoints(ctx, authParams.AuthorityInfo, "") + if err != nil { + return "", err + } + + baseURL, err := url.Parse(endpoints.AuthorizationEndpoint) + if err != nil { + return "", err + } + + v := url.Values{} + v.Add("client_id", clientID) + v.Add("response_type", "code") + v.Add("redirect_uri", redirectURI) + v.Add("scope", strings.Join(scopes, scopeSeparator)) + if authParams.State != "" { + v.Add("state", authParams.State) + } + if authParams.CodeChallenge != "" { + v.Add("code_challenge", authParams.CodeChallenge) + } + if authParams.CodeChallengeMethod != "" { + v.Add("code_challenge_method", authParams.CodeChallengeMethod) + } + if authParams.Prompt != "" { + v.Add("prompt", authParams.Prompt) + } + // There were left over from an implementation that didn't use any of these. We may + // need to add them later, but as of now aren't needed. + /* + if p.ResponseMode != "" { + urlParams.Add("response_mode", p.ResponseMode) + } + if p.LoginHint != "" { + urlParams.Add("login_hint", p.LoginHint) + } + if p.DomainHint != "" { + urlParams.Add("domain_hint", p.DomainHint) + } + */ + baseURL.RawQuery = v.Encode() + return baseURL.String(), nil +} + +func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilentParameters) (AuthResult, error) { + authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and authParams is not a pointer. + authParams.Scopes = silent.Scopes + authParams.HomeAccountID = silent.Account.HomeAccountID + authParams.AuthorizationType = silent.AuthorizationType + authParams.UserAssertion = silent.UserAssertion + + var storageTokenResponse storage.TokenResponse + var err error + if authParams.AuthorizationType == authority.ATOnBehalfOf { + if s, ok := b.pmanager.(cache.Serializer); ok { + suggestedCacheKey := authParams.CacheKey(silent.IsAppCache) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + storageTokenResponse, err = b.pmanager.Read(ctx, authParams) + if err != nil { + return AuthResult{}, err + } + } else { + if s, ok := b.manager.(cache.Serializer); ok { + suggestedCacheKey := authParams.CacheKey(silent.IsAppCache) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + authParams.AuthorizationType = authority.ATRefreshToken + storageTokenResponse, err = b.manager.Read(ctx, authParams, silent.Account) + if err != nil { + return AuthResult{}, err + } + } + + result, err := AuthResultFromStorage(storageTokenResponse) + if err != nil { + if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() { + return AuthResult{}, errors.New("no token found") + } + + var cc *accesstokens.Credential + if silent.RequestType == accesstokens.ATConfidential { + cc = silent.Credential + } + + token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken) + if err != nil { + return AuthResult{}, err + } + + return b.AuthResultFromToken(ctx, authParams, token, true) + } + return result, nil +} + +func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) { + authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. + authParams.Scopes = authCodeParams.Scopes + authParams.Redirecturi = authCodeParams.RedirectURI + authParams.AuthorizationType = authority.ATAuthCode + + var cc *accesstokens.Credential + if authCodeParams.AppType == accesstokens.ATConfidential { + cc = authCodeParams.Credential + authParams.IsConfidentialClient = true + } + + req, err := accesstokens.NewCodeChallengeRequest(authParams, authCodeParams.AppType, cc, authCodeParams.Code, authCodeParams.Challenge) + if err != nil { + return AuthResult{}, err + } + + token, err := b.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return b.AuthResultFromToken(ctx, authParams, token, true) +} + +// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. +func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams AcquireTokenOnBehalfOfParameters) (AuthResult, error) { + authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. + authParams.Scopes = onBehalfOfParams.Scopes + authParams.AuthorizationType = authority.ATOnBehalfOf + authParams.UserAssertion = onBehalfOfParams.UserAssertion + + silentParameters := AcquireTokenSilentParameters{ + Scopes: onBehalfOfParams.Scopes, + RequestType: accesstokens.ATConfidential, + Credential: onBehalfOfParams.Credential, + UserAssertion: onBehalfOfParams.UserAssertion, + AuthorizationType: authority.ATOnBehalfOf, + } + token, err := b.AcquireTokenSilent(ctx, silentParameters) + if err != nil { + fmt.Println("Acquire Token Silent failed ") + token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential) + if err != nil { + return AuthResult{}, err + } + return b.AuthResultFromToken(ctx, authParams, token, true) + } + return token, err +} + +func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) { + if !cacheWrite { + return NewAuthResult(token, shared.Account{}) + } + + var account shared.Account + var err error + if authParams.AuthorizationType == authority.ATOnBehalfOf { + if s, ok := b.pmanager.(cache.Serializer); ok { + suggestedCacheKey := token.CacheKey(authParams) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + account, err = b.pmanager.Write(authParams, token) + if err != nil { + return AuthResult{}, err + } + } else { + if s, ok := b.manager.(cache.Serializer); ok { + suggestedCacheKey := token.CacheKey(authParams) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + account, err = b.manager.Write(authParams, token) + if err != nil { + return AuthResult{}, err + } + } + return NewAuthResult(token, account) +} + +func (b Client) AllAccounts() []shared.Account { + if s, ok := b.manager.(cache.Serializer); ok { + suggestedCacheKey := b.AuthParams.CacheKey(false) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + + accounts := b.manager.AllAccounts() + return accounts +} + +func (b Client) Account(homeAccountID string) shared.Account { + authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. + authParams.AuthorizationType = authority.AccountByID + authParams.HomeAccountID = homeAccountID + if s, ok := b.manager.(cache.Serializer); ok { + suggestedCacheKey := b.AuthParams.CacheKey(false) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + account := b.manager.Account(homeAccountID) + return account +} + +// RemoveAccount removes all the ATs, RTs and IDTs from the cache associated with this account. +func (b Client) RemoveAccount(account shared.Account) { + if s, ok := b.manager.(cache.Serializer); ok { + suggestedCacheKey := b.AuthParams.CacheKey(false) + b.cacheAccessor.Replace(s, suggestedCacheKey) + defer b.cacheAccessor.Export(s, suggestedCacheKey) + } + b.manager.RemoveAccount(account, b.AuthParams.ClientID) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go new file mode 100644 index 000000000000..548c2faebf96 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go @@ -0,0 +1,200 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type Contract struct { + AccessTokens map[string]AccessToken `json:"AccessToken,omitempty"` + RefreshTokens map[string]accesstokens.RefreshToken `json:"RefreshToken,omitempty"` + IDTokens map[string]IDToken `json:"IdToken,omitempty"` + Accounts map[string]shared.Account `json:"Account,omitempty"` + AppMetaData map[string]AppMetaData `json:"AppMetadata,omitempty"` + + AdditionalFields map[string]interface{} +} + +// Contract is the JSON structure that is written to any storage medium when serializing +// the internal cache. This design is shared between MSAL versions in many languages. +// This cannot be changed without design that includes other SDKs. +type InMemoryContract struct { + AccessTokensPartition map[string]map[string]AccessToken + RefreshTokensPartition map[string]map[string]accesstokens.RefreshToken + IDTokensPartition map[string]map[string]IDToken + AccountsPartition map[string]map[string]shared.Account + AppMetaData map[string]AppMetaData +} + +// NewContract is the constructor for Contract. +func NewInMemoryContract() *InMemoryContract { + return &InMemoryContract{ + AccessTokensPartition: map[string]map[string]AccessToken{}, + RefreshTokensPartition: map[string]map[string]accesstokens.RefreshToken{}, + IDTokensPartition: map[string]map[string]IDToken{}, + AccountsPartition: map[string]map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + } +} + +// NewContract is the constructor for Contract. +func NewContract() *Contract { + return &Contract{ + AccessTokens: map[string]AccessToken{}, + RefreshTokens: map[string]accesstokens.RefreshToken{}, + IDTokens: map[string]IDToken{}, + Accounts: map[string]shared.Account{}, + AppMetaData: map[string]AppMetaData{}, + AdditionalFields: map[string]interface{}{}, + } +} + +// AccessToken is the JSON representation of a MSAL access token for encoding to storage. +type AccessToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + Scopes string `json:"target,omitempty"` + ExpiresOn internalTime.Unix `json:"expires_on,omitempty"` + ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"` + CachedAt internalTime.Unix `json:"cached_at,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccessToken is the constructor for AccessToken. +func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token string) AccessToken { + return AccessToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "AccessToken", + ClientID: clientID, + Secret: token, + Scopes: scopes, + CachedAt: internalTime.Unix{T: cachedAt.UTC()}, + ExpiresOn: internalTime.Unix{T: expiresOn.UTC()}, + ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()}, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AccessToken) Key() string { + return strings.Join( + []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}, + shared.CacheKeySeparator, + ) +} + +// FakeValidate enables tests to fake access token validation +var FakeValidate func(AccessToken) error + +// Validate validates that this AccessToken can be used. +func (a AccessToken) Validate() error { + if FakeValidate != nil { + return FakeValidate(a) + } + if a.CachedAt.T.After(time.Now()) { + return errors.New("access token isn't valid, it was cached at a future time") + } + if a.ExpiresOn.T.Before(time.Now().Add(5 * time.Minute)) { + return fmt.Errorf("access token is expired") + } + if a.CachedAt.T.IsZero() { + return fmt.Errorf("access token does not have CachedAt set") + } + return nil +} + +// IDToken is the JSON representation of an MSAL id token for encoding to storage. +type IDToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + Secret string `json:"secret,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + AdditionalFields map[string]interface{} +} + +// IsZero determines if IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// NewIDToken is the constructor for IDToken. +func NewIDToken(homeID, env, realm, clientID, idToken string) IDToken { + return IDToken{ + HomeAccountID: homeID, + Environment: env, + Realm: realm, + CredentialType: "IDToken", + ClientID: clientID, + Secret: idToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (id IDToken) Key() string { + return strings.Join( + []string{id.HomeAccountID, id.Environment, id.CredentialType, id.ClientID, id.Realm}, + shared.CacheKeySeparator, + ) +} + +// AppMetaData is the JSON representation of application metadata for encoding to storage. +type AppMetaData struct { + FamilyID string `json:"family_id,omitempty"` + ClientID string `json:"client_id,omitempty"` + Environment string `json:"environment,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAppMetaData is the constructor for AppMetaData. +func NewAppMetaData(familyID, clientID, environment string) AppMetaData { + return AppMetaData{ + FamilyID: familyID, + ClientID: clientID, + Environment: environment, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (a AppMetaData) Key() string { + return strings.Join( + []string{"AppMetaData", a.Environment, a.ClientID}, + shared.CacheKeySeparator, + ) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go new file mode 100644 index 000000000000..d17e7c034a41 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go @@ -0,0 +1,430 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// PartitionedManager is a partitioned in-memory cache of access tokens, accounts and meta data. +type PartitionedManager struct { + contract *InMemoryContract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// NewPartitionedManager is the constructor for PartitionedManager. +func NewPartitionedManager(requests *oauth.Client) *PartitionedManager { + m := &PartitionedManager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewInMemoryContract() + return m +} + +// Read reads a storage token from the cache if it exists. +func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + userAssertionHash := authParameters.AssertionHash() + partitionKeyFromRequest := userAssertionHash + + accessToken, err := m.readAccessToken(metadata.Aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest) + if err != nil { + return TokenResponse{}, err + } + + AppMetaData, err := m.readAppMetaData(metadata.Aliases, clientID) + if err != nil { + return TokenResponse{}, err + } + familyID := AppMetaData.FamilyID + + refreshToken, err := m.readRefreshToken(metadata.Aliases, familyID, clientID, userAssertionHash, partitionKeyFromRequest) + if err != nil { + return TokenResponse{}, err + } + + idToken, err := m.readIDToken(metadata.Aliases, realm, clientID, userAssertionHash, getPartitionKeyIDTokenRead(accessToken)) + if err != nil { + return TokenResponse{}, err + } + + account, err := m.readAccount(metadata.Aliases, realm, userAssertionHash, idToken.HomeAccountID) + if err != nil { + return TokenResponse{}, err + } + return TokenResponse{ + AccessToken: accessToken, + RefreshToken: refreshToken, + IDToken: idToken, + Account: account, + }, nil +} + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + userAssertionHash := authParameters.AssertionHash() + cachedAt := time.Now() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + refreshToken.UserAssertionHash = userAssertionHash + } + if err := m.writeRefreshToken(refreshToken, getPartitionKeyRefreshToken(refreshToken)); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + accessToken.UserAssertionHash = userAssertionHash // get Hash method on this + } + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken, getPartitionKeyAccessToken(accessToken)); err != nil { + return account, err + } + } else { + return shared.Account{}, err + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + idToken.UserAssertionHash = userAssertionHash + } + if err := m.writeIDToken(idToken, getPartitionKeyIDToken(idToken)); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + idTokenJwt.PreferredUsername, + ) + if authParameters.AuthorizationType == authority.ATOnBehalfOf { + account.UserAssertionHash = userAssertionHash + } + if err := m.writeAccount(account, getPartitionKeyAccount(account)); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *PartitionedManager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *PartitionedManager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *PartitionedManager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey string) (AccessToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + if accessTokens, ok := m.contract.AccessTokensPartition[partitionKey]; ok { + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range accessTokens { + if at.Realm == realm && at.ClientID == clientID && at.UserAssertionHash == userAssertionHash { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at, nil + } + } + } + } + } + return AccessToken{}, fmt.Errorf("access token not found") +} + +func (m *PartitionedManager) writeAccessToken(accessToken AccessToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.AccessTokensPartition[partitionKey] = make(map[string]AccessToken) + } + m.contract.AccessTokensPartition[partitionKey][key] = accessToken + return nil +} + +func matchFamilyRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string, clientID string) bool { + return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *PartitionedManager) readRefreshToken(envAliases []string, familyID, clientID, userAssertionHash, partitionKey string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshTokenObo(rt, userAssertionHash, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshTokenObo(rt, userAssertionHash, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokensPartition[partitionKey] { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func (m *PartitionedManager) writeRefreshToken(refreshToken accesstokens.RefreshToken, partitionKey string) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := refreshToken.Key() + if m.contract.AccessTokensPartition[partitionKey] == nil { + m.contract.RefreshTokensPartition[partitionKey] = make(map[string]accesstokens.RefreshToken) + } + m.contract.RefreshTokensPartition[partitionKey][key] = refreshToken + return nil +} + +func (m *PartitionedManager) readIDToken(envAliases []string, realm, clientID, userAssertionHash, partitionKey string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokensPartition[partitionKey] { + if idt.Realm == realm && idt.ClientID == clientID && idt.UserAssertionHash == userAssertionHash { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *PartitionedManager) writeIDToken(idToken IDToken, partitionKey string) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.IDTokensPartition[partitionKey] == nil { + m.contract.IDTokensPartition[partitionKey] = make(map[string]IDToken) + } + m.contract.IDTokensPartition[partitionKey][key] = idToken + return nil +} + +func (m *PartitionedManager) readAccount(envAliases []string, realm, UserAssertionHash, partitionKey string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.AccountsPartition[partitionKey] { + if checkAlias(acc.Environment, envAliases) && acc.UserAssertionHash == UserAssertionHash && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *PartitionedManager) writeAccount(account shared.Account, partitionKey string) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + if m.contract.AccountsPartition[partitionKey] == nil { + m.contract.AccountsPartition[partitionKey] = make(map[string]shared.Account) + } + m.contract.AccountsPartition[partitionKey][key] = account + return nil +} + +func (m *PartitionedManager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *PartitionedManager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *PartitionedManager) update(cache *InMemoryContract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *PartitionedManager) Marshal() ([]byte, error) { + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *PartitionedManager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewInMemoryContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} + +func getPartitionKeyAccessToken(item AccessToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyRefreshToken(item accesstokens.RefreshToken) string { + if item.UserAssertionHash != "" { + return item.UserAssertionHash + } + return item.HomeAccountID +} + +func getPartitionKeyIDToken(item IDToken) string { + return item.HomeAccountID +} + +func getPartitionKeyAccount(item shared.Account) string { + return item.HomeAccountID +} + +func getPartitionKeyIDTokenRead(item AccessToken) string { + return item.HomeAccountID +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go new file mode 100644 index 000000000000..b759408b5b4a --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -0,0 +1,517 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package storage holds all cached token information for MSAL. This storage can be +// augmented with third-party extensions to provide persistent storage. In that case, +// reads and writes in upper packages will call Marshal() to take the entire in-memory +// representation and write it to storage and Unmarshal() to update the entire in-memory +// storage with what was in the persistent storage. The persistent storage can only be +// accessed in this way because multiple MSAL clients written in multiple languages can +// access the same storage and must adhere to the same method that was defined +// previously. +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// aadInstanceDiscoveryer allows faking in tests. +// It is implemented in production by ops/authority.Client +type aadInstanceDiscoveryer interface { + AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// TokenResponse mimics a token response that was pulled from the cache. +type TokenResponse struct { + RefreshToken accesstokens.RefreshToken + IDToken IDToken // *Credential + AccessToken AccessToken + Account shared.Account +} + +// Manager is an in-memory cache of access tokens, accounts and meta data. This data is +// updated on read/write calls. Unmarshal() replaces all data stored here with whatever +// was given to it on each call. +type Manager struct { + contract *Contract + contractMu sync.RWMutex + requests aadInstanceDiscoveryer // *oauth.Token + + aadCacheMu sync.RWMutex + aadCache map[string]authority.InstanceDiscoveryMetadata +} + +// New is the constructor for Manager. +func New(requests *oauth.Client) *Manager { + m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)} + m.contract = NewContract() + return m +} + +func checkAlias(alias string, aliases []string) bool { + for _, v := range aliases { + if alias == v { + return true + } + } + return false +} + +func isMatchingScopes(scopesOne []string, scopesTwo string) bool { + newScopesTwo := strings.Split(scopesTwo, scopeSeparator) + scopeCounter := 0 + for _, scope := range scopesOne { + for _, otherScope := range newScopesTwo { + if strings.EqualFold(scope, otherScope) { + scopeCounter++ + continue + } + } + } + return scopeCounter == len(scopesOne) +} + +// Read reads a storage token from the cache if it exists. +func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (TokenResponse, error) { + homeAccountID := authParameters.HomeAccountID + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + scopes := authParameters.Scopes + + // fetch metadata if and only if the authority isn't explicitly trusted + aliases := authParameters.KnownAuthorityHosts + if len(aliases) == 0 { + metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo) + if err != nil { + return TokenResponse{}, err + } + aliases = metadata.Aliases + } + + accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes) + + if account.IsZero() { + return TokenResponse{ + AccessToken: accessToken, + RefreshToken: accesstokens.RefreshToken{}, + IDToken: IDToken{}, + Account: shared.Account{}, + }, nil + } + idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID) + if err != nil { + return TokenResponse{}, err + } + + AppMetaData, err := m.readAppMetaData(aliases, clientID) + if err != nil { + return TokenResponse{}, err + } + familyID := AppMetaData.FamilyID + + refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID) + if err != nil { + return TokenResponse{}, err + } + account, err = m.readAccount(homeAccountID, aliases, realm) + if err != nil { + return TokenResponse{}, err + } + return TokenResponse{ + AccessToken: accessToken, + RefreshToken: refreshToken, + IDToken: idToken, + Account: account, + }, nil +} + +const scopeSeparator = " " + +// Write writes a token response to the cache and returns the account information the token is stored with. +func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { + authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + homeAccountID := authParameters.HomeAccountID + environment := authParameters.AuthorityInfo.Host + realm := authParameters.AuthorityInfo.Tenant + clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) + + cachedAt := time.Now() + + var account shared.Account + + if len(tokenResponse.RefreshToken) > 0 { + refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID) + if err := m.writeRefreshToken(refreshToken); err != nil { + return account, err + } + } + + if len(tokenResponse.AccessToken) > 0 { + accessToken := NewAccessToken( + homeAccountID, + environment, + realm, + clientID, + cachedAt, + tokenResponse.ExpiresOn.T, + tokenResponse.ExtExpiresOn.T, + target, + tokenResponse.AccessToken, + ) + + // Since we have a valid access token, cache it before moving on. + if err := accessToken.Validate(); err == nil { + if err := m.writeAccessToken(accessToken); err != nil { + return account, err + } + } + } + + idTokenJwt := tokenResponse.IDToken + if !idTokenJwt.IsZero() { + idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken) + if err := m.writeIDToken(idToken); err != nil { + return shared.Account{}, err + } + + localAccountID := idTokenJwt.LocalAccountID() + authorityType := authParameters.AuthorityInfo.AuthorityType + + account = shared.NewAccount( + homeAccountID, + environment, + realm, + localAccountID, + authorityType, + idTokenJwt.PreferredUsername, + ) + if err := m.writeAccount(account); err != nil { + return shared.Account{}, err + } + } + + AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment) + + if err := m.writeAppMetaData(AppMetaData); err != nil { + return shared.Account{}, err + } + return account, nil +} + +func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + md, err := m.aadMetadataFromCache(ctx, authorityInfo) + if err != nil { + // not in the cache, retrieve it + md, err = m.aadMetadata(ctx, authorityInfo) + } + return md, err +} + +func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.RLock() + defer m.aadCacheMu.RUnlock() + metadata, ok := m.aadCache[authorityInfo.Host] + if ok { + return metadata, nil + } + return metadata, errors.New("not found") +} + +func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + m.aadCacheMu.Lock() + defer m.aadCacheMu.Unlock() + discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return authority.InstanceDiscoveryMetadata{}, err + } + + for _, metadataEntry := range discoveryResponse.Metadata { + for _, aliasedAuthority := range metadataEntry.Aliases { + m.aadCache[aliasedAuthority] = metadataEntry + } + } + if _, ok := m.aadCache[authorityInfo.Host]; !ok { + m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{ + PreferredNetwork: authorityInfo.Host, + PreferredCache: authorityInfo.Host, + } + } + return m.aadCache[authorityInfo.Host], nil +} + +func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) AccessToken { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. + // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't + // an issue, however if it does become a problem then we know where to look. + for _, at := range m.contract.AccessTokens { + if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at + } + } + } + } + return AccessToken{} +} + +func (m *Manager) writeAccessToken(accessToken AccessToken) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + key := accessToken.Key() + m.contract.AccessTokens[key] = accessToken + return nil +} + +func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) { + byFamily := func(rt accesstokens.RefreshToken) bool { + return matchFamilyRefreshToken(rt, homeID, envAliases) + } + byClient := func(rt accesstokens.RefreshToken) bool { + return matchClientIDRefreshToken(rt, homeID, envAliases, clientID) + } + + var matchers []func(rt accesstokens.RefreshToken) bool + if familyID == "" { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byClient, byFamily, + } + } else { + matchers = []func(rt accesstokens.RefreshToken) bool{ + byFamily, byClient, + } + } + + // TODO(keegan): All the tests here pass, but Bogdan says this is + // more complicated. I'm opening an issue for this to have him + // review the tests and suggest tests that would break this so + // we can re-write against good tests. His comments as follow: + // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is. + // The algorithm is: + // If application is NOT part of the family, search by client_ID + // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, matcher := range matchers { + for _, rt := range m.contract.RefreshTokens { + if matcher(rt) { + return rt, nil + } + } + } + + return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") +} + +func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != "" +} + +func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool { + return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID +} + +func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error { + key := refreshToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.RefreshTokens[key] = refreshToken + return nil +} + +func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + for _, idt := range m.contract.IDTokens { + if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID { + if checkAlias(idt.Environment, envAliases) { + return idt, nil + } + } + } + return IDToken{}, fmt.Errorf("token not found") +} + +func (m *Manager) writeIDToken(idToken IDToken) error { + key := idToken.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.IDTokens[key] = idToken + return nil +} + +func (m *Manager) AllAccounts() []shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + var accounts []shared.Account + for _, v := range m.contract.Accounts { + accounts = append(accounts, v) + } + + return accounts +} + +func (m *Manager) Account(homeAccountID string) shared.Account { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, v := range m.contract.Accounts { + if v.HomeAccountID == homeAccountID { + return v + } + } + + return shared.Account{} +} + +func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. + // We only use a map because the storage contract shared between all language implementations says use a map. + // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing + // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup + // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored + // is really low (say 2). Each hash is more expensive than the entire iteration. + for _, acc := range m.contract.Accounts { + if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm { + return acc, nil + } + } + return shared.Account{}, fmt.Errorf("account not found") +} + +func (m *Manager) writeAccount(account shared.Account) error { + key := account.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.Accounts[key] = account + return nil +} + +func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { + m.contractMu.RLock() + defer m.contractMu.RUnlock() + + for _, app := range m.contract.AppMetaData { + if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + return app, nil + } + } + return AppMetaData{}, fmt.Errorf("not found") +} + +func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error { + key := AppMetaData.Key() + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract.AppMetaData[key] = AppMetaData + return nil +} + +// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account. +func (m *Manager) RemoveAccount(account shared.Account, clientID string) { + m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID) + m.removeAccessTokens(account.HomeAccountID, account.Environment) + m.removeIDTokens(account.HomeAccountID, account.Environment) + m.removeAccounts(account.HomeAccountID, account.Environment) +} + +func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, rt := range m.contract.RefreshTokens { + // Check for RTs associated with the account. + if rt.HomeAccountID == homeID && rt.Environment == env { + // Do RT's app ownership check as a precaution, in case family apps + // and 3rd-party apps share same token cache, although they should not. + if rt.ClientID == clientID || rt.FamilyID != "" { + delete(m.contract.RefreshTokens, key) + } + } + } +} + +func (m *Manager) removeAccessTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, at := range m.contract.AccessTokens { + // Remove AT's associated with the account + if at.HomeAccountID == homeID && at.Environment == env { + // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check. + // It means ATs for other apps will also be removed, it is OK because: + // non-family apps are not supposed to share token cache to begin with; + // Even if it happens, we keep other app's RT already, so SSO still works. + delete(m.contract.AccessTokens, key) + } + } +} + +func (m *Manager) removeIDTokens(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, idt := range m.contract.IDTokens { + // Remove ID tokens associated with the account. + if idt.HomeAccountID == homeID && idt.Environment == env { + delete(m.contract.IDTokens, key) + } + } +} + +func (m *Manager) removeAccounts(homeID string, env string) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + for key, acc := range m.contract.Accounts { + // Remove the specified account. + if acc.HomeAccountID == homeID && acc.Environment == env { + delete(m.contract.Accounts, key) + } + } +} + +// update updates the internal cache object. This is for use in tests, other uses are not +// supported. +func (m *Manager) update(cache *Contract) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + m.contract = cache +} + +// Marshal implements cache.Marshaler. +func (m *Manager) Marshal() ([]byte, error) { + return json.Marshal(m.contract) +} + +// Unmarshal implements cache.Unmarshaler. +func (m *Manager) Unmarshal(b []byte) error { + m.contractMu.Lock() + defer m.contractMu.Unlock() + + contract := NewContract() + + err := json.Unmarshal(b, contract) + if err != nil { + return err + } + + m.contract = contract + + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json new file mode 100644 index 000000000000..1d8181924d14 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json @@ -0,0 +1,56 @@ +{ + "Account": { + "uid.utid-login.windows.net-contoso": { + "username": "John Doe", + "local_account_id": "object1234", + "realm": "contoso", + "environment": "login.windows.net", + "home_account_id": "uid.utid", + "authority_type": "MSSTS" + } + }, + "RefreshToken": { + "uid.utid-login.windows.net-refreshtoken-my_client_id--s2 s1 s3": { + "target": "s2 s1 s3", + "environment": "login.windows.net", + "credential_type": "RefreshToken", + "secret": "a refresh token", + "client_id": "my_client_id", + "home_account_id": "uid.utid" + } + }, + "AccessToken": { + "an-entry": { + "foo": "bar" + }, + "uid.utid-login.windows.net-accesstoken-my_client_id-contoso-s2 s1 s3": { + "environment": "login.windows.net", + "credential_type": "AccessToken", + "secret": "an access token", + "realm": "contoso", + "target": "s2 s1 s3", + "client_id": "my_client_id", + "cached_at": "1000", + "home_account_id": "uid.utid", + "extended_expires_on": "4600", + "expires_on": "4600" + } + }, + "IdToken": { + "uid.utid-login.windows.net-idtoken-my_client_id-contoso-": { + "realm": "contoso", + "environment": "login.windows.net", + "credential_type": "IdToken", + "secret": "header.eyJvaWQiOiAib2JqZWN0MTIzNCIsICJwcmVmZXJyZWRfdXNlcm5hbWUiOiAiSm9obiBEb2UiLCAic3ViIjogInN1YiJ9.signature", + "client_id": "my_client_id", + "home_account_id": "uid.utid" + } + }, + "unknownEntity": {"field1":"1","field2":"whats"}, + "AppMetadata": { + "AppMetadata-login.windows.net-my_client_id": { + "environment": "login.windows.net", + "client_id": "my_client_id" + } + } + } \ No newline at end of file diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go new file mode 100644 index 000000000000..7b673e3fe126 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// package exported contains internal types that are re-exported from a public package +package exported + +// AssertionRequestOptions has information required to generate a client assertion +type AssertionRequestOptions struct { + // ClientID identifies the application for which an assertion is requested. Used as the assertion's "iss" and "sub" claims. + ClientID string + + // TokenEndpoint is the intended token endpoint. Used as the assertion's "aud" claim. + TokenEndpoint string +} + +// TokenProviderParameters is the authentication parameters passed to token providers +type TokenProviderParameters struct { + // Claims contains any additional claims requested for the token + Claims string + // CorrelationID of the authentication request + CorrelationID string + // Scopes requested for the token + Scopes []string + // TenantID identifies the tenant in which to authenticate + TenantID string +} + +// TokenProviderResult is the authentication result returned by custom token providers +type TokenProviderResult struct { + // AccessToken is the requested token + AccessToken string + // ExpiresInSeconds is the lifetime of the token in seconds + ExpiresInSeconds int +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md new file mode 100644 index 000000000000..09edb01b7e43 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md @@ -0,0 +1,140 @@ +# JSON Package Design +Author: John Doak(jdoak@microsoft.com) + +## Why? + +This project needs a special type of marshal/unmarshal not directly supported +by the encoding/json package. + +The need revolves around a few key wants/needs: +- unmarshal and marshal structs representing JSON messages +- fields in the messgage not in the struct must be maintained when unmarshalled +- those same fields must be marshalled back when encoded again + +The initial version used map[string]interface{} to put in the keys that +were known and then any other keys were put into a field called AdditionalFields. + +This has a few negatives: +- Dual marshaling/unmarshalling is required +- Adding a struct field requires manually adding a key by name to be encoded/decoded from the map (which is a loosely coupled construct), which can lead to bugs that aren't detected or have bad side effects +- Tests can become quickly disconnected if those keys aren't put +in tests as well. So you think you have support working, but you +don't. Existing tests were found that didn't test the marshalling output. +- There is no enforcement that if AdditionalFields is required on one struct, it should be on all containers +that don't have custom marshal/unmarshal. + +This package aims to support our needs by providing custom Marshal()/Unmarshal() functions. + +This prevents all the negatives in the initial solution listed above. However, it does add its own negative: +- Custom encoding/decoding via reflection is messy (as can be seen in encoding/json itself) + +Go proverb: Reflection is never clear +Suggested reading: https://blog.golang.org/laws-of-reflection + +## Important design decisions + +- We don't want to understand all JSON decoding rules +- We don't want to deal with all the quoting, commas, etc on decode +- Need support for json.Marshaler/Unmarshaler, so we can support types like time.Time +- If struct does not implement json.Unmarshaler, it must have AdditionalFields defined +- We only support root level objects that are \*struct or struct + +To faciliate these goals, we will utilize the json.Encoder and json.Decoder. +They provide streaming processing (efficient) and return errors on bad JSON. + +Support for json.Marshaler/Unmarshaler allows for us to use non-basic types +that must be specially encoded/decoded (like time.Time objects). + +We don't support types that can't customer unmarshal or have AdditionalFields +in order to prevent future devs from forgetting that important field and +generating bad return values. + +Support for root level objects of \*struct or struct simply acknowledges the +fact that this is designed only for the purposes listed in the Introduction. +Outside that (like encoding a lone number) should be done with the +regular json package (as it will not have additional fields). + +We don't support a few things on json supported reference types and structs: +- \*map: no need for pointers to maps +- \*slice: no need for pointers to slices +- any further pointers on struct after \*struct + +There should never be a need for this in Go. + +## Design + +## State Machines + +This uses state machine designs that based upon the Rob Pike talk on +lexers and parsers: https://www.youtube.com/watch?v=HxaD_trXwRE + +This is the most common pattern for state machines in Go and +the model to follow closesly when dealing with streaming +processing of textual data. + +Our state machines are based on the type: +```go +type stateFn func() (stateFn, error) +``` + +The state machine itself is simply a struct that has methods that +satisfy stateFn. + +Our state machines have a few standard calls +- run(): runs the state machine +- start(): always the first stateFn to be called + +All state machines have the following logic: +* run() is called +* start() is called and returns the next stateFn or error +* stateFn is called + - If returned stateFn(next state) is non-nil, call it + - If error is non-nil, run() returns the error + - If stateFn == nil and err == nil, run() return err == nil + +## Supporting types + +Marshalling/Unmarshalling must support(within top level struct): +- struct +- \*struct +- []struct +- []\*struct +- []map[string]structContainer +- [][]structContainer + +**Term note:** structContainer == type that has a struct or \*struct inside it + +We specifically do not support []interface or map[string]interface +where the interface value would hold some value with a struct in it. + +Those will still marshal/unmarshal, but without support for +AdditionalFields. + +## Marshalling + +The marshalling design will be based around a statemachine design. + +The basic logic is as follows: + +* If struct has custom marshaller, call it and return +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* If struct does not have "AdditionalFields", give an error +* Get struct tag detailing json names to go names, create mapping +* For each public field name + - Write field name out + - If field value is a struct, recursively call our state machine + - Otherwise, use the json.Encoder to write out the value + +## Unmarshalling + +The unmarshalling desin is also based around a statemachine design. The +basic logic is as follows: + +* If struct has custom marhaller, call it +* If struct has field "AdditionalFields", it must be a map[string]interface{} +* Get struct tag detailing json names to go names, create mapping +* For each key found + - If key exists, + - If value is basic type, extract value into struct field using Decoder + - If value is struct type, recursively call statemachine + - If key doesn't exist, add it to AdditionalFields if it exists using Decoder diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go new file mode 100644 index 000000000000..2238521f5f91 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -0,0 +1,184 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package json provide functions for marshalling an unmarshalling types to JSON. These functions are meant to +// be utilized inside of structs that implement json.Unmarshaler and json.Marshaler interfaces. +// This package provides the additional functionality of writing fields that are not in the struct when marshalling +// to a field called AdditionalFields if that field exists and is a map[string]interface{}. +// When marshalling, if the struct has all the same prerequisites, it will uses the keys in AdditionalFields as +// extra fields. This package uses encoding/json underneath. +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +const addField = "AdditionalFields" +const ( + marshalJSON = "MarshalJSON" + unmarshalJSON = "UnmarshalJSON" +) + +var ( + leftBrace = []byte("{")[0] + rightBrace = []byte("}")[0] + comma = []byte(",")[0] + leftParen = []byte("[")[0] + rightParen = []byte("]")[0] +) + +var mapStrInterType = reflect.TypeOf(map[string]interface{}{}) + +// stateFn defines a state machine function. This will be used in all state +// machines in this package. +type stateFn func() (stateFn, error) + +// Marshal is used to marshal a type into its JSON representation. It +// wraps the stdlib calls in order to marshal a struct or *struct so +// that a field called "AdditionalFields" of type map[string]interface{} +// with "-" used inside struct tag `json:"-"` can be marshalled as if +// they were fields within the struct. +func Marshal(i interface{}) ([]byte, error) { + buff := bytes.Buffer{} + enc := json.NewEncoder(&buff) + enc.SetEscapeHTML(false) + enc.SetIndent("", "") + + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + err := marshalStruct(v, &buff, enc) + if err != nil { + return nil, err + } + return buff.Bytes(), nil +} + +// Unmarshal unmarshals a []byte representing JSON into i, which must be a *struct. In addition, if the struct has +// a field called AdditionalFields of type map[string]interface{}, JSON data representing fields not in the struct +// will be written as key/value pairs to AdditionalFields. +func Unmarshal(b []byte, i interface{}) error { + if len(b) == 0 { + return nil + } + + jdec := json.NewDecoder(bytes.NewBuffer(b)) + jdec.UseNumber() + return unmarshalStruct(jdec, i) +} + +// MarshalRaw marshals i into a json.RawMessage. If I cannot be marshalled, +// this will panic. This is exposed to help test AdditionalField values +// which are stored as json.RawMessage. +func MarshalRaw(i interface{}) json.RawMessage { + b, err := json.Marshal(i) + if err != nil { + panic(err) + } + return json.RawMessage(b) +} + +// isDelim simply tests to see if a json.Token is a delimeter. +func isDelim(got json.Token) bool { + switch got.(type) { + case json.Delim: + return true + } + return false +} + +// delimIs tests got to see if it is want. +func delimIs(got json.Token, want rune) bool { + switch v := got.(type) { + case json.Delim: + if v == json.Delim(want) { + return true + } + } + return false +} + +// hasMarshalJSON will determine if the value or a pointer to this value has +// the MarshalJSON method. +func hasMarshalJSON(v reflect.Value) bool { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Marshaler) + return ok + } + return false +} + +// callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. +// This will panic if the method is not defined. +func callMarshalJSON(v reflect.Value) ([]byte, error) { + if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } else { + if v.CanAddr() { + v = v.Addr() + } + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + marsh := v.Interface().(json.Marshaler) + return marsh.MarshalJSON() + } + + panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) +} + +// hasUnmarshalJSON will determine if the value or a pointer to this value has +// the UnmarshalJSON method. +func hasUnmarshalJSON(v reflect.Value) bool { + // You can't unmarshal on a non-pointer type. + if v.Kind() != reflect.Ptr { + if !v.CanAddr() { + return false + } + v = v.Addr() + } + + if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { + _, ok := v.Interface().(json.Unmarshaler) + return ok + } + + return false +} + +// hasOmitEmpty indicates if the field has instructed us to not output +// the field if omitempty is set on the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func hasOmitEmpty(tag string) bool { + sl := strings.Split(tag, ",") + for _, str := range sl { + if str == "omitempty" { + return true + } + } + return false +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go new file mode 100644 index 000000000000..cef442f25c86 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go @@ -0,0 +1,333 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// unmarshalMap unmarshal's a map. +func unmarshalMap(dec *json.Decoder, m reflect.Value) error { + if m.Kind() != reflect.Ptr || m.Elem().Kind() != reflect.Map { + panic("unmarshalMap called on non-*map value") + } + mapValueType := m.Elem().Type().Elem() + walk := mapWalk{dec: dec, m: m, valueType: mapValueType} + if err := walk.run(); err != nil { + return err + } + return nil +} + +type mapWalk struct { + dec *json.Decoder + key string + m reflect.Value + valueType reflect.Type +} + +// run runs our decoder state machine. +func (m *mapWalk) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapWalk) start() (stateFn, error) { + // maps can have custom unmarshaler's. + if hasUnmarshalJSON(m.m) { + err := m.dec.Decode(m.m.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the map value is: + // *struct/struct/map/slice + // otherwise use standard decode + t, _ := m.valueBaseType() + switch t.Kind() { + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := m.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to JSON null. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + return m.next, nil + case reflect.Ptr: + return nil, fmt.Errorf("do not support maps with values of '**type' or '*reference") + } + + // This is a basic map type, so just use Decode(). + if err := m.dec.Decode(m.m.Interface()); err != nil { + return nil, err + } + + return nil, nil +} + +func (m *mapWalk) next() (stateFn, error) { + if m.dec.More() { + key, err := m.dec.Token() + if err != nil { + return nil, err + } + m.key = key.(string) + return m.storeValue, nil + } + // No more entries, so remove final }. + _, err := m.dec.Token() + if err != nil { + return nil, err + } + return nil, nil +} + +func (m *mapWalk) storeValue() (stateFn, error) { + v := m.valueType + for { + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + continue + case reflect.Struct: + return m.storeStruct, nil + case reflect.Map: + return m.storeMap, nil + case reflect.Slice: + return m.storeSlice, nil + } + return nil, fmt.Errorf("bug: mapWalk.storeValue() called on unsupported type: %v", v.Kind()) + } +} + +func (m *mapWalk) storeStruct() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalStruct(m.dec, v.Interface()); err != nil { + return nil, err + } + + if m.valueType.Kind() == reflect.Ptr { + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + return m.next, nil + } + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +func (m *mapWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(m.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(m.dec, ptr); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v) + + return m.next, nil +} + +func (m *mapWalk) storeSlice() (stateFn, error) { + v := newValue(m.valueType) + if err := unmarshalSlice(m.dec, v); err != nil { + return nil, err + } + + m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem()) + + return m.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (m *mapWalk) valueBaseType() (reflect.Type, bool) { + ptr := false + v := m.valueType + if v.Kind() == reflect.Ptr { + ptr = true + v = v.Elem() + } + return v, ptr +} + +// unmarshalSlice unmarshal's the next value, which must be a slice, into +// ptrSlice, which must be a pointer to a slice. newValue() can be use to +// create the slice. +func unmarshalSlice(dec *json.Decoder, ptrSlice reflect.Value) error { + if ptrSlice.Kind() != reflect.Ptr || ptrSlice.Elem().Kind() != reflect.Slice { + panic("unmarshalSlice called on non-*[]slice value") + } + sliceValueType := ptrSlice.Elem().Type().Elem() + walk := sliceWalk{ + dec: dec, + s: ptrSlice, + valueType: sliceValueType, + } + if err := walk.run(); err != nil { + return err + } + + return nil +} + +type sliceWalk struct { + dec *json.Decoder + s reflect.Value // *[]slice + valueType reflect.Type +} + +// run runs our decoder state machine. +func (s *sliceWalk) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceWalk) start() (stateFn, error) { + // slices can have custom unmarshaler's. + if hasUnmarshalJSON(s.s) { + err := s.dec.Decode(s.s.Interface()) + if err != nil { + return nil, err + } + return nil, nil + } + + // We only want to use this if the slice value is: + // []*struct/[]struct/[]map/[]slice + // otherwise use standard decode + t := s.valueBaseType() + + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("cannot unmarshal into a ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + delim, err := s.dec.Token() + if err != nil { + return nil, err + } + // This indicates the value was set to nil. + if delim == nil { + return nil, nil + } + if !delimIs(delim, '[') { + return nil, fmt.Errorf("Unmarshal expected opening [, received %v", delim) + } + return s.next, nil + } + + if err := s.dec.Decode(s.s.Interface()); err != nil { + return nil, err + } + return nil, nil +} + +func (s *sliceWalk) next() (stateFn, error) { + if s.dec.More() { + return s.storeValue, nil + } + // Nothing left in the slice, remove closing ] + _, err := s.dec.Token() + return nil, err +} + +func (s *sliceWalk) storeValue() (stateFn, error) { + t := s.valueBaseType() + switch t.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("do not support 'pointer to pointer' or 'pointer to reference' types") + case reflect.Struct: + return s.storeStruct, nil + case reflect.Map: + return s.storeMap, nil + case reflect.Slice: + return s.storeSlice, nil + } + return nil, fmt.Errorf("bug: sliceWalk.storeValue() called on unsupported type: %v", t.Kind()) +} + +func (s *sliceWalk) storeStruct() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalStruct(s.dec, v.Interface()); err != nil { + return nil, err + } + + if s.valueType.Kind() == reflect.Ptr { + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + return s.next, nil + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + return s.next, nil +} + +func (s *sliceWalk) storeMap() (stateFn, error) { + v := reflect.MakeMap(s.valueType) + ptr := newValue(v.Type()) + ptr.Elem().Set(v) + + if err := unmarshalMap(s.dec, ptr); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v)) + + return s.next, nil +} + +func (s *sliceWalk) storeSlice() (stateFn, error) { + v := newValue(s.valueType) + if err := unmarshalSlice(s.dec, v); err != nil { + return nil, err + } + + s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem())) + + return s.next, nil +} + +// valueType returns the underlying Type. So a *struct would yield +// struct, etc... +func (s *sliceWalk) valueBaseType() reflect.Type { + v := s.valueType + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + return v +} + +// newValue() returns a new *type that represents type passed. +func newValue(valueType reflect.Type) reflect.Value { + if valueType.Kind() == reflect.Ptr { + return reflect.New(valueType.Elem()) + } + return reflect.New(valueType) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go new file mode 100644 index 000000000000..df5dc6e11b50 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go @@ -0,0 +1,346 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "unicode" +) + +// marshalStruct takes in i, which must be a *struct or struct and marshals its content +// as JSON into buff (sometimes with writes to buff directly, sometimes via enc). +// This call is recursive for all fields of *struct or struct type. +func marshalStruct(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + // We only care about custom Marshalling a struct. + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: marshal() received a non *struct or struct, received type %T", v.Interface()) + } + + if hasMarshalJSON(v) { + b, err := callMarshalJSON(v) + if err != nil { + return err + } + buff.Write(b) + return nil + } + + t := v.Type() + + // If it has an AdditionalFields field make sure its the right type. + f := v.FieldByName(addField) + if f.Kind() != reflect.Invalid { + if f.Kind() != reflect.Map { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + if !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface()) + } + } + + translator, err := findFields(v) + if err != nil { + return err + } + + buff.WriteByte(leftBrace) + for x := 0; x < v.NumField(); x++ { + field := v.Field(x) + + // We don't access private fields. + if unicode.IsLower(rune(t.Field(x).Name[0])) { + continue + } + + if t.Field(x).Name == addField { + if v.Field(x).Len() > 0 { + if err := writeAddFields(field.Interface(), buff, enc); err != nil { + return err + } + buff.WriteByte(comma) + } + continue + } + + // If they have omitempty set, we don't write out the field if + // it is the zero value. + if hasOmitEmpty(t.Field(x).Tag.Get("json")) { + if v.Field(x).IsZero() { + continue + } + } + + // Write out the field name part. + jsonName := translator.jsonName(t.Field(x).Name) + buff.WriteString(fmt.Sprintf("%q:", jsonName)) + + if field.Kind() == reflect.Ptr { + field = field.Elem() + } + + if err := marshalStructField(field, buff, enc); err != nil { + return err + } + } + + buff.Truncate(buff.Len() - 1) // Remove final comma + buff.WriteByte(rightBrace) + + return nil +} + +func marshalStructField(field reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + // Determine if we need a trailing comma. + defer buff.WriteByte(comma) + + switch field.Kind() { + // If it was a *struct or struct, we need to recursively all marshal(). + case reflect.Struct: + if field.CanAddr() { + field = field.Addr() + } + return marshalStruct(field, buff, enc) + case reflect.Map: + return marshalMap(field, buff, enc) + case reflect.Slice: + return marshalSlice(field, buff, enc) + } + + // It is just a basic type, so encode it. + if err := enc.Encode(field.Interface()); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + return nil +} + +func marshalMap(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Map { + return fmt.Errorf("bug: marshalMap() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftBrace) + buff.WriteByte(rightBrace) + return nil + } + encoder := mapEncode{m: v, buff: buff, enc: enc} + return encoder.run() +} + +type mapEncode struct { + m reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (m *mapEncode) run() error { + var state = m.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (m *mapEncode) start() (stateFn, error) { + if hasMarshalJSON(m.m) { + b, err := callMarshalJSON(m.m) + if err != nil { + return nil, err + } + m.buff.Write(b) + return nil, nil + } + + valueBaseType := m.m.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + m.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return m.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := m.enc.Encode(m.m.Interface()); err != nil { + return nil, err + } + m.buff.Truncate(m.buff.Len() - 1) // Remove Encode() added \n + return nil, nil +} + +func (m *mapEncode) encode() (stateFn, error) { + m.buff.WriteByte(leftBrace) + + iter := m.m.MapRange() + for iter.Next() { + // Write the key. + k := iter.Key() + m.buff.WriteString(fmt.Sprintf("%q:", k.String())) + + v := iter.Value() + switch m.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, m.buff, m.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, m.buff, m.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", m.valueBaseType.Kind())) + } + m.buff.WriteByte(comma) + } + m.buff.Truncate(m.buff.Len() - 1) // Remove final comma + m.buff.WriteByte(rightBrace) + + return nil, nil +} + +func marshalSlice(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error { + if v.Kind() != reflect.Slice { + return fmt.Errorf("bug: marshalSlice() called on %T", v.Interface()) + } + if v.Len() == 0 { + buff.WriteByte(leftParen) + buff.WriteByte(rightParen) + return nil + } + encoder := sliceEncode{s: v, buff: buff, enc: enc} + return encoder.run() +} + +type sliceEncode struct { + s reflect.Value + buff *bytes.Buffer + enc *json.Encoder + + valueBaseType reflect.Type +} + +// run runs our encoder state machine. +func (s *sliceEncode) run() error { + var state = s.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +func (s *sliceEncode) start() (stateFn, error) { + if hasMarshalJSON(s.s) { + b, err := callMarshalJSON(s.s) + if err != nil { + return nil, err + } + s.buff.Write(b) + return nil, nil + } + + valueBaseType := s.s.Type().Elem() + if valueBaseType.Kind() == reflect.Ptr { + valueBaseType = valueBaseType.Elem() + } + s.valueBaseType = valueBaseType + + switch valueBaseType.Kind() { + case reflect.Ptr: + return nil, fmt.Errorf("Marshal does not support ** or *") + case reflect.Struct, reflect.Map, reflect.Slice: + return s.encode, nil + } + + // If the map value doesn't have a struct/map/slice, just Encode() it. + if err := s.enc.Encode(s.s.Interface()); err != nil { + return nil, err + } + s.buff.Truncate(s.buff.Len() - 1) // Remove Encode added \n + + return nil, nil +} + +func (s *sliceEncode) encode() (stateFn, error) { + s.buff.WriteByte(leftParen) + for i := 0; i < s.s.Len(); i++ { + v := s.s.Index(i) + switch s.valueBaseType.Kind() { + case reflect.Struct: + if v.CanAddr() { + v = v.Addr() + } + if err := marshalStruct(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Map: + if err := marshalMap(v, s.buff, s.enc); err != nil { + return nil, err + } + case reflect.Slice: + if err := marshalSlice(v, s.buff, s.enc); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", s.valueBaseType.Kind())) + } + s.buff.WriteByte(comma) + } + s.buff.Truncate(s.buff.Len() - 1) // Remove final comma + s.buff.WriteByte(rightParen) + return nil, nil +} + +// writeAddFields writes the AdditionalFields struct field out to JSON as field +// values. i must be a map[string]interface{} or this will panic. +func writeAddFields(i interface{}, buff *bytes.Buffer, enc *json.Encoder) error { + m := i.(map[string]interface{}) + + x := 0 + for k, v := range m { + buff.WriteString(fmt.Sprintf("%q:", k)) + if err := enc.Encode(v); err != nil { + return err + } + buff.Truncate(buff.Len() - 1) // Remove Encode() added \n + + if x+1 != len(m) { + buff.WriteByte(comma) + } + x++ + } + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go new file mode 100644 index 000000000000..07751544a282 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go @@ -0,0 +1,290 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package json + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +func unmarshalStruct(jdec *json.Decoder, i interface{}) error { + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i) + } + + if hasUnmarshalJSON(v) { + // Indicates that this type has a custom Unmarshaler. + return jdec.Decode(v.Addr().Interface()) + } + + f := v.FieldByName(addField) + if f.Kind() == reflect.Invalid { + return fmt.Errorf("Unmarshal(%T) only supports structs that have the field AdditionalFields or implements json.Unmarshaler", i) + } + + if f.Kind() != reflect.Map || !f.Type().AssignableTo(mapStrInterType) { + return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", i) + } + + dec := newDecoder(jdec, v) + return dec.run() +} + +type decoder struct { + dec *json.Decoder + value reflect.Value // This will be a reflect.Struct + translator translateFields + key string +} + +func newDecoder(dec *json.Decoder, value reflect.Value) *decoder { + return &decoder{value: value, dec: dec} +} + +// run runs our decoder state machine. +func (d *decoder) run() error { + var state = d.start + var err error + for { + state, err = state() + if err != nil { + return err + } + if state == nil { + return nil + } + } +} + +// start looks for our opening delimeter '{' and then transitions to looping through our fields. +func (d *decoder) start() (stateFn, error) { + var err error + d.translator, err = findFields(d.value) + if err != nil { + return nil, err + } + + delim, err := d.dec.Token() + if err != nil { + return nil, err + } + if !delimIs(delim, '{') { + return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim) + } + + return d.next, nil +} + +// next gets the next struct field name from the raw json or stops the machine if we get our closing }. +func (d *decoder) next() (stateFn, error) { + if !d.dec.More() { + // Remove the closing }. + if _, err := d.dec.Token(); err != nil { + return nil, err + } + return nil, nil + } + + key, err := d.dec.Token() + if err != nil { + return nil, err + } + + d.key = key.(string) + return d.storeValue, nil +} + +// storeValue takes the next value and stores it our struct. If the field can't be found +// in the struct, it pushes the operation to storeAdditional(). +func (d *decoder) storeValue() (stateFn, error) { + goName := d.translator.goName(d.key) + if goName == "" { + goName = d.key + } + + // We don't have the field in the struct, so it goes in AdditionalFields. + f := d.value.FieldByName(goName) + if f.Kind() == reflect.Invalid { + return d.storeAdditional, nil + } + + // Indicates that this type has a custom Unmarshaler. + if hasUnmarshalJSON(f) { + err := d.dec.Decode(f.Addr().Interface()) + if err != nil { + return nil, err + } + return d.next, nil + } + + t, isPtr, err := fieldBaseType(d.value, goName) + if err != nil { + return nil, fmt.Errorf("type(%s) had field(%s) %w", d.value.Type().Name(), goName, err) + } + + switch t.Kind() { + // We need to recursively call ourselves on any *struct or struct. + case reflect.Struct: + if isPtr { + if f.IsNil() { + f.Set(reflect.New(t)) + } + } else { + f = f.Addr() + } + if err := unmarshalStruct(d.dec, f.Interface()); err != nil { + return nil, err + } + return d.next, nil + case reflect.Map: + v := reflect.MakeMap(f.Type()) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalMap(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + case reflect.Slice: + v := reflect.MakeSlice(f.Type(), 0, 0) + ptr := newValue(f.Type()) + ptr.Elem().Set(v) + if err := unmarshalSlice(d.dec, ptr); err != nil { + return nil, err + } + f.Set(ptr.Elem()) + return d.next, nil + } + + if !isPtr { + f = f.Addr() + } + + // For values that are pointers, we need them to be non-nil in order + // to decode into them. + if f.IsNil() { + f.Set(reflect.New(t)) + } + + if err := d.dec.Decode(f.Interface()); err != nil { + return nil, err + } + + return d.next, nil +} + +// storeAdditional pushes the key/value into our .AdditionalFields map. +func (d *decoder) storeAdditional() (stateFn, error) { + rw := json.RawMessage{} + if err := d.dec.Decode(&rw); err != nil { + return nil, err + } + field := d.value.FieldByName(addField) + if field.IsNil() { + field.Set(reflect.MakeMap(field.Type())) + } + field.SetMapIndex(reflect.ValueOf(d.key), reflect.ValueOf(rw)) + return d.next, nil +} + +func fieldBaseType(v reflect.Value, fieldName string) (t reflect.Type, isPtr bool, err error) { + sf, ok := v.Type().FieldByName(fieldName) + if !ok { + return nil, false, fmt.Errorf("bug: fieldBaseType() lookup of field(%s) on type(%s): do not have field", fieldName, v.Type().Name()) + } + t = sf.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + isPtr = true + } + if t.Kind() == reflect.Ptr { + return nil, isPtr, fmt.Errorf("received pointer to pointer type, not supported") + } + return t, isPtr, nil +} + +type translateField struct { + jsonName string + goName string +} + +// translateFields is a list of translateFields with a handy lookup method. +type translateFields []translateField + +// goName loops through a list of fields looking for one contaning the jsonName and +// returning the goName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) goName(jsonName string) string { + for _, entry := range t { + if entry.jsonName == jsonName { + return entry.goName + } + } + return "" +} + +// jsonName loops through a list of fields looking for one contaning the goName and +// returning the jsonName. If not found, returns the empty string. +// Note: not a map because at this size slices are faster even in tight loops. +func (t translateFields) jsonName(goName string) string { + for _, entry := range t { + if entry.goName == goName { + return entry.jsonName + } + } + return "" +} + +var umarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + +// findFields parses a struct and writes the field tags for lookup. It will return an error +// if any field has a type of *struct or struct that does not implement json.Marshaler. +func findFields(v reflect.Value) (translateFields, error) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("findFields received a %s type, expected *struct or struct", v.Type().Name()) + } + tfs := make([]translateField, 0, v.NumField()) + for i := 0; i < v.NumField(); i++ { + tf := translateField{ + goName: v.Type().Field(i).Name, + jsonName: parseTag(v.Type().Field(i).Tag.Get("json")), + } + switch tf.jsonName { + case "", "-": + tf.jsonName = tf.goName + } + tfs = append(tfs, tf) + + f := v.Field(i) + if f.Kind() == reflect.Ptr { + f = f.Elem() + } + if f.Kind() == reflect.Struct { + if f.Type().Implements(umarshalerType) { + return nil, fmt.Errorf("struct type %q which has field %q which "+ + "doesn't implement json.Unmarshaler", v.Type().Name(), v.Type().Field(i).Name) + } + } + } + return tfs, nil +} + +// parseTag just returns the first entry in the tag. tag is the string +// returned by reflect.StructField.Tag().Get(). +func parseTag(tag string) string { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx] + } + return tag +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go new file mode 100644 index 000000000000..a1c99621e9fc --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package time provides for custom types to translate time from JSON and other formats +// into time.Time objects. +package time + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Unix provides a type that can marshal and unmarshal a string representation +// of the unix epoch into a time.Time object. +type Unix struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (u Unix) MarshalJSON() ([]byte, error) { + if u.T.IsZero() { + return []byte(""), nil + } + return []byte(fmt.Sprintf("%q", strconv.FormatInt(u.T.Unix(), 10))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (u *Unix) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + u.T = time.Unix(int64(i), 0) + return nil +} + +// DurationTime provides a type that can marshal and unmarshal a string representation +// of a duration from now into a time.Time object. +// Note: I'm not sure this is the best way to do this. What happens is we get a field +// called "expires_in" that represents the seconds from now that this expires. We +// turn that into a time we call .ExpiresOn. But maybe we should be recording +// when the token was received at .TokenRecieved and .ExpiresIn should remain as a duration. +// Then we could have a method called ExpiresOn(). Honestly, the whole thing is +// bad because the server doesn't return a concrete time. I think this is +// cleaner, but its not great either. +type DurationTime struct { + T time.Time +} + +// MarshalJSON implements encoding/json.MarshalJSON(). +func (d DurationTime) MarshalJSON() ([]byte, error) { + if d.T.IsZero() { + return []byte(""), nil + } + + dt := time.Until(d.T) + return []byte(fmt.Sprintf("%d", int64(dt*time.Second))), nil +} + +// UnmarshalJSON implements encoding/json.UnmarshalJSON(). +func (d *DurationTime) UnmarshalJSON(b []byte) error { + i, err := strconv.Atoi(strings.Trim(string(b), `"`)) + if err != nil { + return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err) + } + d.T = time.Now().Add(time.Duration(i) * time.Second) + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go new file mode 100644 index 000000000000..04236ff3127a --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -0,0 +1,177 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package local contains a local HTTP server used with interactive authentication. +package local + +import ( + "context" + "fmt" + "net" + "net/http" + "strconv" + "strings" + "time" +) + +var okPage = []byte(` + + + + + Authentication Complete + + +

    Authentication complete. You can return to the application. Feel free to close this browser tab.

    + + +`) + +const failPage = ` + + + + + Authentication Failed + + +

    Authentication failed. You can return to the application. Feel free to close this browser tab.

    +

    Error details: error %s error_description: %s

    + + +` + +// Result is the result from the redirect. +type Result struct { + // Code is the code sent by the authority server. + Code string + // Err is set if there was an error. + Err error +} + +// Server is an HTTP server. +type Server struct { + // Addr is the address the server is listening on. + Addr string + resultCh chan Result + s *http.Server + reqState string +} + +// New creates a local HTTP server and starts it. +func New(reqState string, port int) (*Server, error) { + var l net.Listener + var err error + var portStr string + if port > 0 { + // use port provided by caller + l, err = net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + portStr = strconv.FormatInt(int64(port), 10) + } else { + // find a free port + for i := 0; i < 10; i++ { + l, err = net.Listen("tcp", "localhost:0") + if err != nil { + continue + } + addr := l.Addr().String() + portStr = addr[strings.LastIndex(addr, ":")+1:] + break + } + } + if err != nil { + return nil, err + } + + serv := &Server{ + Addr: fmt.Sprintf("http://localhost:%s", portStr), + s: &http.Server{Addr: "localhost:0", ReadHeaderTimeout: time.Second}, + reqState: reqState, + resultCh: make(chan Result, 1), + } + serv.s.Handler = http.HandlerFunc(serv.handler) + + if err := serv.start(l); err != nil { + return nil, err + } + + return serv, nil +} + +func (s *Server) start(l net.Listener) error { + go func() { + err := s.s.Serve(l) + if err != nil { + select { + case s.resultCh <- Result{Err: err}: + default: + } + } + }() + + return nil +} + +// Result gets the result of the redirect operation. Once a single result is returned, the server +// is shutdown. ctx deadline will be honored. +func (s *Server) Result(ctx context.Context) Result { + select { + case <-ctx.Done(): + return Result{Err: ctx.Err()} + case r := <-s.resultCh: + return r + } +} + +// Shutdown shuts down the server. +func (s *Server) Shutdown() { + // Note: You might get clever and think you can do this in handler() as a defer, you can't. + _ = s.s.Shutdown(context.Background()) +} + +func (s *Server) putResult(r Result) { + select { + case s.resultCh <- r: + default: + } +} + +func (s *Server) handler(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query() + + headerErr := q.Get("error") + if headerErr != "" { + desc := q.Get("error_description") + // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, + // change this to s.error() and make s.error() write the failPage instead of an error code. + _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) + s.putResult(Result{Err: fmt.Errorf(desc)}) + return + } + + respState := q.Get("state") + switch respState { + case s.reqState: + case "": + s.error(w, http.StatusInternalServerError, "server didn't send OAuth state") + return + default: + s.error(w, http.StatusInternalServerError, "mismatched OAuth state, req(%s), resp(%s)", s.reqState, respState) + return + } + + code := q.Get("code") + if code == "" { + s.error(w, http.StatusInternalServerError, "authorization code missing in query string") + return + } + + _, _ = w.Write(okPage) + s.putResult(Result{Code: code}) +} + +func (s *Server) error(w http.ResponseWriter, code int, str string, i ...interface{}) { + err := fmt.Errorf(str, i...) + http.Error(w, err.Error(), code) + s.putResult(Result{Err: err}) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go new file mode 100644 index 000000000000..6b4016c1166f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -0,0 +1,296 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package oauth + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" + "github.com/google/uuid" +) + +// ResolveEndpointer contains the methods for resolving authority endpoints. +type ResolveEndpointer interface { + ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) +} + +// AccessTokens contains the methods for fetching tokens from different sources. +type AccessTokens interface { + DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (accesstokens.DeviceCodeResult, error) + FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (accesstokens.TokenResponse, error) + FromAuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) + FromRefreshToken(ctx context.Context, appType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken string) (accesstokens.TokenResponse, error) + FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (accesstokens.TokenResponse, error) + FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (accesstokens.TokenResponse, error) + FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (accesstokens.TokenResponse, error) + FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (accesstokens.TokenResponse, error) + FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult accesstokens.DeviceCodeResult) (accesstokens.TokenResponse, error) + FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (accesstokens.TokenResponse, error) +} + +// FetchAuthority will be implemented by authority.Authority. +type FetchAuthority interface { + UserRealm(context.Context, authority.AuthParams) (authority.UserRealm, error) + AADInstanceDiscovery(context.Context, authority.Info) (authority.InstanceDiscoveryResponse, error) +} + +// FetchWSTrust contains the methods for interacting with WSTrust endpoints. +type FetchWSTrust interface { + Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) + SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (wstrust.SamlTokenInfo, error) +} + +// Client provides tokens for various types of token requests. +type Client struct { + Resolver ResolveEndpointer + AccessTokens AccessTokens + Authority FetchAuthority + WSTrust FetchWSTrust +} + +// New is the constructor for Token. +func New(httpClient ops.HTTPClient) *Client { + r := ops.New(httpClient) + return &Client{ + Resolver: newAuthorityEndpoint(r), + AccessTokens: r.AccessTokens(), + Authority: r.Authority(), + WSTrust: r.WSTrust(), + } +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance. +func (t *Client) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + return t.Resolver.ResolveEndpoints(ctx, authorityInfo, userPrincipalName) +} + +func (t *Client) AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) { + return t.Authority.AADInstanceDiscovery(ctx, authorityInfo) +} + +// AuthCode returns a token based on an authorization code. +func (t *Client) AuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) { + if err := t.resolveEndpoint(ctx, &req.AuthParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + tResp, err := t.AccessTokens.FromAuthCode(ctx, req) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("could not retrieve token from auth code: %w", err) + } + return tResp, nil +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if cred.TokenProvider != nil { + now := time.Now() + scopes := make([]string, len(authParams.Scopes)) + copy(scopes, authParams.Scopes) + params := exported.TokenProviderParameters{ + CorrelationID: uuid.New().String(), + Scopes: scopes, + } + tr, err := cred.TokenProvider(ctx, params) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return accesstokens.TokenResponse{ + AccessToken: tr.AccessToken, + ExpiresOn: internalTime.DurationTime{ + T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second), + }, + GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes}, + }, nil + } + + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromClientSecret(ctx, authParams, cred.Secret) + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromAssertion(ctx, authParams, jwt) +} + +// Credential acquires a token from the authority using a client credentials grant. +func (t *Client) OnBehalfOf(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) { + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + if cred.Secret != "" { + return t.AccessTokens.FromUserAssertionClientSecret(ctx, authParams, authParams.UserAssertion, cred.Secret) + + } + jwt, err := cred.JWT(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUserAssertionClientCertificate(ctx, authParams, authParams.UserAssertion, jwt) +} + +func (t *Client) Refresh(ctx context.Context, reqType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken accesstokens.RefreshToken) (accesstokens.TokenResponse, error) { + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + return t.AccessTokens.FromRefreshToken(ctx, reqType, authParams, cc, refreshToken.Secret) +} + +// UsernamePassword retrieves a token where a username and password is used. However, if this is +// a user realm of "Federated", this uses SAML tokens. If "Managed", uses normal username/password. +func (t *Client) UsernamePassword(ctx context.Context, authParams authority.AuthParams) (accesstokens.TokenResponse, error) { + if authParams.AuthorityInfo.AuthorityType == authority.ADFS { + if err := t.resolveEndpoint(ctx, &authParams, authParams.Username); err != nil { + return accesstokens.TokenResponse{}, err + } + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return accesstokens.TokenResponse{}, err + } + + userRealm, err := t.Authority.UserRealm(ctx, authParams) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("problem getting user realm(user: %s) from authority: %w", authParams.Username, err) + } + + switch userRealm.AccountType { + case authority.Federated: + mexDoc, err := t.WSTrust.Mex(ctx, userRealm.FederationMetadataURL) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("problem getting mex doc from federated url(%s): %w", userRealm.FederationMetadataURL, err) + } + + saml, err := t.WSTrust.SAMLTokenInfo(ctx, authParams, userRealm.CloudAudienceURN, mexDoc.UsernamePasswordEndpoint) + if err != nil { + return accesstokens.TokenResponse{}, fmt.Errorf("problem getting SAML token info: %w", err) + } + return t.AccessTokens.FromSamlGrant(ctx, authParams, saml) + case authority.Managed: + return t.AccessTokens.FromUsernamePassword(ctx, authParams) + } + return accesstokens.TokenResponse{}, errors.New("unknown account type") +} + +// DeviceCode is the result of a call to Token.DeviceCode(). +type DeviceCode struct { + // Result is the device code result from the first call in the device code flow. This allows + // the caller to retrieve the displayed code that is used to authorize on the second device. + Result accesstokens.DeviceCodeResult + authParams authority.AuthParams + + accessTokens AccessTokens +} + +// Token returns a token AFTER the user uses the user code on the second device. This will block +// until either: (1) the code is input by the user and the service releases a token, (2) the token +// expires, (3) the Context passed to .DeviceCode() is cancelled or expires, (4) some other service +// error occurs. +func (d DeviceCode) Token(ctx context.Context) (accesstokens.TokenResponse, error) { + if d.accessTokens == nil { + return accesstokens.TokenResponse{}, fmt.Errorf("DeviceCode was either created outside its package or the creating method had an error. DeviceCode is not valid") + } + + var cancel context.CancelFunc + d.Result.ExpiresOn.Sub(time.Now().UTC()) + if deadline, ok := ctx.Deadline(); !ok || d.Result.ExpiresOn.Before(deadline) { + ctx, cancel = context.WithDeadline(ctx, d.Result.ExpiresOn) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + var interval = 50 * time.Millisecond + timer := time.NewTimer(interval) + defer timer.Stop() + + for { + timer.Reset(interval) + select { + case <-ctx.Done(): + return accesstokens.TokenResponse{}, ctx.Err() + case <-timer.C: + interval += interval * 2 + if interval > 5*time.Second { + interval = 5 * time.Second + } + } + + token, err := d.accessTokens.FromDeviceCodeResult(ctx, d.authParams, d.Result) + if err != nil && isWaitDeviceCodeErr(err) { + continue + } + return token, err // This handles if it was a non-wait error or success + } +} + +type deviceCodeError struct { + Error string `json:"error"` +} + +func isWaitDeviceCodeErr(err error) bool { + var c errors.CallErr + if !errors.As(err, &c) { + return false + } + if c.Resp.StatusCode != 400 { + return false + } + var dCErr deviceCodeError + defer c.Resp.Body.Close() + body, err := io.ReadAll(c.Resp.Body) + if err != nil { + return false + } + err = json.Unmarshal(body, &dCErr) + if err != nil { + return false + } + if dCErr.Error == "authorization_pending" || dCErr.Error == "slow_down" { + return true + } + return false +} + +// DeviceCode returns a DeviceCode object that can be used to get the code that must be entered on the second +// device and optionally the token once the code has been entered on the second device. +func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams) (DeviceCode, error) { + if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { + return DeviceCode{}, err + } + + dcr, err := t.AccessTokens.DeviceCodeResult(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dcr, authParams: authParams, accessTokens: t.AccessTokens}, nil +} + +func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { + endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) + if err != nil { + return fmt.Errorf("unable to resolve an endpoint: %s", err) + } + authParams.Endpoints = endpoints + return nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go new file mode 100644 index 000000000000..eaeb2ef5f08b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -0,0 +1,412 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package accesstokens exposes a REST client for querying backend systems to get various types of +access tokens (oauth) for use in authentication. + +These calls are of type "application/x-www-form-urlencoded". This means we use url.Values to +represent arguments and then encode them into the POST body message. We receive JSON in +return for the requests. The request definition is defined in https://tools.ietf.org/html/rfc7521#section-4.2 . +*/ +package accesstokens + +import ( + "context" + "crypto" + + /* #nosec */ + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" + "github.com/golang-jwt/jwt/v4" + "github.com/google/uuid" +) + +const ( + grantType = "grant_type" + deviceCode = "device_code" + clientID = "client_id" + clientInfo = "client_info" + clientInfoVal = "1" + username = "username" + password = "password" +) + +//go:generate stringer -type=AppType + +// AppType is whether the authorization code flow is for a public or confidential client. +type AppType int8 + +const ( + // ATUnknown is the zero value when the type hasn't been set. + ATUnknown AppType = iota + // ATPublic indicates this if for the Public.Client. + ATPublic + // ATConfidential indicates this if for the Confidential.Client. + ATConfidential +) + +type urlFormCaller interface { + URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error +} + +// DeviceCodeResponse represents the HTTP response received from the device code endpoint +type DeviceCodeResponse struct { + authority.OAuthResponseBase + + UserCode string `json:"user_code"` + DeviceCode string `json:"device_code"` + VerificationURL string `json:"verification_url"` + ExpiresIn int `json:"expires_in"` + Interval int `json:"interval"` + Message string `json:"message"` + + AdditionalFields map[string]interface{} +} + +// Convert converts the DeviceCodeResponse to a DeviceCodeResult +func (dcr DeviceCodeResponse) Convert(clientID string, scopes []string) DeviceCodeResult { + expiresOn := time.Now().UTC().Add(time.Duration(dcr.ExpiresIn) * time.Second) + return NewDeviceCodeResult(dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, expiresOn, dcr.Interval, dcr.Message, clientID, scopes) +} + +// Credential represents the credential used in confidential client flows. This can be either +// a Secret or Cert/Key. +type Credential struct { + // Secret contains the credential secret if we are doing auth by secret. + Secret string + + // Cert is the public certificate, if we're authenticating by certificate. + Cert *x509.Certificate + // Key is the private key for signing, if we're authenticating by certificate. + Key crypto.PrivateKey + // X5c is the JWT assertion's x5c header value, required for SN/I authentication. + X5c []string + + // AssertionCallback is a function provided by the application, if we're authenticating by assertion. + AssertionCallback func(context.Context, exported.AssertionRequestOptions) (string, error) + + // TokenProvider is a function provided by the application that implements custom authentication + // logic for a confidential client + TokenProvider func(context.Context, exported.TokenProviderParameters) (exported.TokenProviderResult, error) +} + +// JWT gets the jwt assertion when the credential is not using a secret. +func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (string, error) { + if c.AssertionCallback != nil { + options := exported.AssertionRequestOptions{ + ClientID: authParams.ClientID, + TokenEndpoint: authParams.Endpoints.TokenEndpoint, + } + return c.AssertionCallback(ctx, options) + } + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + "aud": authParams.Endpoints.TokenEndpoint, + "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)), + "iss": authParams.ClientID, + "jti": uuid.New().String(), + "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)), + "sub": authParams.ClientID, + }) + token.Header = map[string]interface{}{ + "alg": "RS256", + "typ": "JWT", + "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)), + } + + if authParams.SendX5C { + token.Header["x5c"] = c.X5c + } + + assertion, err := token.SignedString(c.Key) + if err != nil { + return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err) + } + return assertion, nil +} + +// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT. +// https://tools.ietf.org/html/rfc7517#section-4.8 +func thumbprint(cert *x509.Certificate) []byte { + /* #nosec */ + a := sha1.Sum(cert.Raw) + return a[:] +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm urlFormCaller + + testing bool +} + +// FromUsernamePassword uses a username and password to get an access token. +func (c Client) FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { + qv := url.Values{} + qv.Set(grantType, grant.Password) + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +// AuthCodeRequest stores the values required to request a token from the authority using an authorization code +type AuthCodeRequest struct { + AuthParams authority.AuthParams + Code string + CodeChallenge string + Credential *Credential + AppType AppType +} + +// NewCodeChallengeRequest returns an AuthCodeRequest that uses a code challenge.. +func NewCodeChallengeRequest(params authority.AuthParams, appType AppType, cc *Credential, code, challenge string) (AuthCodeRequest, error) { + if appType == ATUnknown { + return AuthCodeRequest{}, fmt.Errorf("bug: NewCodeChallengeRequest() called with AppType == ATUnknown") + } + return AuthCodeRequest{ + AuthParams: params, + AppType: appType, + Code: code, + CodeChallenge: challenge, + Credential: cc, + }, nil +} + +// FromAuthCode uses an authorization code to retrieve an access token. +func (c Client) FromAuthCode(ctx context.Context, req AuthCodeRequest) (TokenResponse, error) { + var qv url.Values + + switch req.AppType { + case ATUnknown: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == ATUnknown") + case ATConfidential: + var err error + if req.Credential == nil { + return TokenResponse{}, fmt.Errorf("AuthCodeRequest had nil Credential for Confidential app") + } + qv, err = prepURLVals(ctx, req.Credential, req.AuthParams) + if err != nil { + return TokenResponse{}, err + } + case ATPublic: + qv = url.Values{} + default: + return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == %v, which we do not recongnize", req.AppType) + } + + qv.Set(grantType, grant.AuthCode) + qv.Set("code", req.Code) + qv.Set("code_verifier", req.CodeChallenge) + qv.Set("redirect_uri", req.AuthParams.Redirecturi) + qv.Set(clientID, req.AuthParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, req.AuthParams) + + return c.doTokenResp(ctx, req.AuthParams, qv) +} + +// FromRefreshToken uses a refresh token (for refreshing credentials) to get a new access token. +func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParams authority.AuthParams, cc *Credential, refreshToken string) (TokenResponse, error) { + qv := url.Values{} + if appType == ATConfidential { + var err error + qv, err = prepURLVals(ctx, cc, authParams) + if err != nil { + return TokenResponse{}, err + } + } + qv.Set(grantType, grant.RefreshToken) + qv.Set(clientID, authParams.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("refresh_token", refreshToken) + addScopeQueryParam(qv, authParams) + + return c.doTokenResp(ctx, authParams, qv) +} + +// FromClientSecret uses a client's secret (aka password) to get a new token. +func (c Client) FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_secret", clientSecret) + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromClientSecret(): %w", err) + } + return token, nil +} + +func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) { + qv := url.Values{} + qv.Set(grantType, grant.ClientCredential) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + token, err := c.doTokenResp(ctx, authParameters, qv) + if err != nil { + return token, fmt.Errorf("FromAssertion(): %w", err) + } + return token, nil +} + +func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) { + qv := url.Values{} + qv.Set(grantType, grant.JWT) + qv.Set(clientID, authParameters.ClientID) + qv.Set("client_secret", clientSecret) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (TokenResponse, error) { + qv := url.Values{} + qv.Set(grantType, grant.JWT) + qv.Set("client_assertion_type", grant.ClientAssertion) + qv.Set("client_assertion", assertion) + qv.Set(clientID, authParameters.ClientID) + qv.Set("assertion", userAssertion) + qv.Set(clientInfo, clientInfoVal) + qv.Set("requested_token_use", "on_behalf_of") + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (DeviceCodeResult, error) { + qv := url.Values{} + qv.Set(clientID, authParameters.ClientID) + addScopeQueryParam(qv, authParameters) + + endpoint := strings.Replace(authParameters.Endpoints.TokenEndpoint, "token", "devicecode", -1) + + resp := DeviceCodeResponse{} + err := c.Comm.URLFormCall(ctx, endpoint, qv, &resp) + if err != nil { + return DeviceCodeResult{}, err + } + + return resp.Convert(authParameters.ClientID, authParameters.Scopes), nil +} + +func (c Client) FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult DeviceCodeResult) (TokenResponse, error) { + qv := url.Values{} + qv.Set(grantType, grant.DeviceCode) + qv.Set(deviceCode, deviceCodeResult.DeviceCode) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + addScopeQueryParam(qv, authParameters) + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (TokenResponse, error) { + qv := url.Values{} + qv.Set(username, authParameters.Username) + qv.Set(password, authParameters.Password) + qv.Set(clientID, authParameters.ClientID) + qv.Set(clientInfo, clientInfoVal) + qv.Set("assertion", base64.StdEncoding.WithPadding(base64.StdPadding).EncodeToString([]byte(samlGrant.Assertion))) + addScopeQueryParam(qv, authParameters) + + switch samlGrant.AssertionType { + case grant.SAMLV1: + qv.Set(grantType, grant.SAMLV1) + case grant.SAMLV2: + qv.Set(grantType, grant.SAMLV2) + default: + return TokenResponse{}, fmt.Errorf("GetAccessTokenFromSamlGrant returned unknown SAML assertion type: %q", samlGrant.AssertionType) + } + + return c.doTokenResp(ctx, authParameters, qv) +} + +func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams, qv url.Values) (TokenResponse, error) { + resp := TokenResponse{} + err := c.Comm.URLFormCall(ctx, authParams.Endpoints.TokenEndpoint, qv, &resp) + if err != nil { + return resp, err + } + resp.ComputeScope(authParams) + if c.testing { + return resp, nil + } + return resp, resp.Validate() +} + +// prepURLVals returns an url.Values that sets various key/values if we are doing secrets +// or JWT assertions. +func prepURLVals(ctx context.Context, cc *Credential, authParams authority.AuthParams) (url.Values, error) { + params := url.Values{} + if cc.Secret != "" { + params.Set("client_secret", cc.Secret) + return params, nil + } + + jwt, err := cc.JWT(ctx, authParams) + if err != nil { + return nil, err + } + params.Set("client_assertion", jwt) + params.Set("client_assertion_type", grant.ClientAssertion) + return params, nil +} + +// openid required to get an id token +// offline_access required to get a refresh token +// profile required to get the client_info field back +var detectDefaultScopes = map[string]bool{ + "openid": true, + "offline_access": true, + "profile": true, +} + +var defaultScopes = []string{"openid", "offline_access", "profile"} + +func AppendDefaultScopes(authParameters authority.AuthParams) []string { + scopes := make([]string, 0, len(authParameters.Scopes)+len(defaultScopes)) + for _, scope := range authParameters.Scopes { + s := strings.TrimSpace(scope) + if s == "" { + continue + } + if detectDefaultScopes[scope] { + continue + } + scopes = append(scopes, scope) + } + scopes = append(scopes, defaultScopes...) + return scopes +} + +func addScopeQueryParam(queryParams url.Values, authParameters authority.AuthParams) { + scopes := AppendDefaultScopes(authParameters) + queryParams.Set("scope", strings.Join(scopes, " ")) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go new file mode 100644 index 000000000000..3bec4a67cf10 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=AppType"; DO NOT EDIT. + +package accesstokens + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATPublic-1] + _ = x[ATConfidential-2] +} + +const _AppType_name = "ATUnknownATPublicATConfidential" + +var _AppType_index = [...]uint8{0, 9, 17, 31} + +func (i AppType) String() string { + if i < 0 || i >= AppType(len(_AppType_index)-1) { + return "AppType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AppType_name[_AppType_index[i]:_AppType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go new file mode 100644 index 000000000000..cc847001979d --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go @@ -0,0 +1,332 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package accesstokens + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "time" + + internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// IDToken consists of all the information used to validate a user. +// https://docs.microsoft.com/azure/active-directory/develop/id-tokens . +type IDToken struct { + PreferredUsername string `json:"preferred_username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + Oid string `json:"oid,omitempty"` + TenantID string `json:"tid,omitempty"` + Subject string `json:"sub,omitempty"` + UPN string `json:"upn,omitempty"` + Email string `json:"email,omitempty"` + AlternativeID string `json:"alternative_id,omitempty"` + Issuer string `json:"iss,omitempty"` + Audience string `json:"aud,omitempty"` + ExpirationTime int64 `json:"exp,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + RawToken string + + AdditionalFields map[string]interface{} +} + +var null = []byte("null") + +// UnmarshalJSON implements json.Unmarshaler. +func (i *IDToken) UnmarshalJSON(b []byte) error { + if bytes.Equal(null, b) { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type idToken2 IDToken + + jwt := strings.Trim(string(b), `"`) + jwtArr := strings.Split(jwt, ".") + if len(jwtArr) < 2 { + return errors.New("IDToken returned from server is invalid") + } + + jwtPart := jwtArr[1] + jwtDecoded, err := decodeJWT(jwtPart) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken, problem decoding JWT: %w", err) + } + + token := idToken2{} + err = json.Unmarshal(jwtDecoded, &token) + if err != nil { + return fmt.Errorf("unable to unmarshal IDToken: %w", err) + } + token.RawToken = jwt + + *i = IDToken(token) + return nil +} + +// IsZero indicates if the IDToken is the zero value. +func (i IDToken) IsZero() bool { + v := reflect.ValueOf(i) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// LocalAccountID extracts an account's local account ID from an ID token. +func (i IDToken) LocalAccountID() string { + if i.Oid != "" { + return i.Oid + } + return i.Subject +} + +// jwtDecoder is provided to allow tests to provide their own. +var jwtDecoder = decodeJWT + +// ClientInfo is used to create a Home Account ID for an account. +type ClientInfo struct { + UID string `json:"uid"` + UTID string `json:"utid"` + + AdditionalFields map[string]interface{} +} + +// UnmarshalJSON implements json.Unmarshaler.s +func (c *ClientInfo) UnmarshalJSON(b []byte) error { + s := strings.Trim(string(b), `"`) + // Client info may be empty in some flows, e.g. certificate exchange. + if len(s) == 0 { + return nil + } + + // Because we have a custom unmarshaler, you + // cannot directly call json.Unmarshal here. If you do, it will call this function + // recursively until reach our recursion limit. We have to create a new type + // that doesn't have this method in order to use json.Unmarshal. + type clientInfo2 ClientInfo + + raw, err := jwtDecoder(s) + if err != nil { + return fmt.Errorf("TokenResponse client_info field had JWT decode error: %w", err) + } + + var c2 clientInfo2 + + err = json.Unmarshal(raw, &c2) + if err != nil { + return fmt.Errorf("was unable to unmarshal decoded JWT in TokenRespone to ClientInfo: %w", err) + } + + *c = ClientInfo(c2) + return nil +} + +// HomeAccountID creates the home account ID. +func (c ClientInfo) HomeAccountID() string { + if c.UID == "" || c.UTID == "" { + return "" + } + return fmt.Sprintf("%s.%s", c.UID, c.UTID) +} + +// Scopes represents scopes in a TokenResponse. +type Scopes struct { + Slice []string +} + +// UnmarshalJSON implements json.Unmarshal. +func (s *Scopes) UnmarshalJSON(b []byte) error { + str := strings.Trim(string(b), `"`) + if len(str) == 0 { + return nil + } + sl := strings.Split(str, " ") + s.Slice = sl + return nil +} + +// TokenResponse is the information that is returned from a token endpoint during a token acquisition flow. +type TokenResponse struct { + authority.OAuthResponseBase + + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + FamilyID string `json:"foci"` + IDToken IDToken `json:"id_token"` + ClientInfo ClientInfo `json:"client_info"` + ExpiresOn internalTime.DurationTime `json:"expires_in"` + ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"` + GrantedScopes Scopes `json:"scope"` + DeclinedScopes []string // This is derived + + AdditionalFields map[string]interface{} + + scopesComputed bool +} + +// ComputeScope computes the final scopes based on what was granted by the server and +// what our AuthParams were from the authority server. Per OAuth spec, if no scopes are returned, the response should be treated as if all scopes were granted +// This behavior can be observed in client assertion flows, but can happen at any time, this check ensures we treat +// those special responses properly Link to spec: https://tools.ietf.org/html/rfc6749#section-3.3 +func (tr *TokenResponse) ComputeScope(authParams authority.AuthParams) { + if len(tr.GrantedScopes.Slice) == 0 { + tr.GrantedScopes = Scopes{Slice: authParams.Scopes} + } else { + tr.DeclinedScopes = findDeclinedScopes(authParams.Scopes, tr.GrantedScopes.Slice) + } + tr.scopesComputed = true +} + +// Validate validates the TokenResponse has basic valid values. It must be called +// after ComputeScopes() is called. +func (tr *TokenResponse) Validate() error { + if tr.Error != "" { + return fmt.Errorf("%s: %s", tr.Error, tr.ErrorDescription) + } + + if tr.AccessToken == "" { + return errors.New("response is missing access_token") + } + + if !tr.scopesComputed { + return fmt.Errorf("TokenResponse hasn't had ScopesComputed() called") + } + return nil +} + +func (tr *TokenResponse) CacheKey(authParams authority.AuthParams) string { + if authParams.AuthorizationType == authority.ATOnBehalfOf { + return authParams.AssertionHash() + } + if authParams.AuthorizationType == authority.ATClientCredentials { + return authParams.AppKey() + } + if authParams.IsConfidentialClient || authParams.AuthorizationType == authority.ATRefreshToken { + return tr.ClientInfo.HomeAccountID() + } + return "" +} + +func findDeclinedScopes(requestedScopes []string, grantedScopes []string) []string { + declined := []string{} + grantedMap := map[string]bool{} + for _, s := range grantedScopes { + grantedMap[strings.ToLower(s)] = true + } + // Comparing the requested scopes with the granted scopes to see if there are any scopes that have been declined. + for _, r := range requestedScopes { + if !grantedMap[strings.ToLower(r)] { + declined = append(declined, r) + } + } + return declined +} + +// decodeJWT decodes a JWT and converts it to a byte array representing a JSON object +// JWT has headers and payload base64url encoded without padding +// https://tools.ietf.org/html/rfc7519#section-3 and +// https://tools.ietf.org/html/rfc7515#section-2 +func decodeJWT(data string) ([]byte, error) { + // https://tools.ietf.org/html/rfc7515#appendix-C + return base64.RawURLEncoding.DecodeString(data) +} + +// RefreshToken is the JSON representation of a MSAL refresh token for encoding to storage. +type RefreshToken struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + CredentialType string `json:"credential_type,omitempty"` + ClientID string `json:"client_id,omitempty"` + FamilyID string `json:"family_id,omitempty"` + Secret string `json:"secret,omitempty"` + Realm string `json:"realm,omitempty"` + Target string `json:"target,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewRefreshToken is the constructor for RefreshToken. +func NewRefreshToken(homeID, env, clientID, refreshToken, familyID string) RefreshToken { + return RefreshToken{ + HomeAccountID: homeID, + Environment: env, + CredentialType: "RefreshToken", + ClientID: clientID, + FamilyID: familyID, + Secret: refreshToken, + } +} + +// Key outputs the key that can be used to uniquely look up this entry in a map. +func (rt RefreshToken) Key() string { + var fourth = rt.FamilyID + if fourth == "" { + fourth = rt.ClientID + } + + return strings.Join( + []string{rt.HomeAccountID, rt.Environment, rt.CredentialType, fourth}, + shared.CacheKeySeparator, + ) +} + +func (rt RefreshToken) GetSecret() string { + return rt.Secret +} + +// DeviceCodeResult stores the response from the STS device code endpoint. +type DeviceCodeResult struct { + // UserCode is the code the user needs to provide when authentication at the verification URI. + UserCode string + // DeviceCode is the code used in the access token request. + DeviceCode string + // VerificationURL is the the URL where user can authenticate. + VerificationURL string + // ExpiresOn is the expiration time of device code in seconds. + ExpiresOn time.Time + // Interval is the interval at which the STS should be polled at. + Interval int + // Message is the message which should be displayed to the user. + Message string + // ClientID is the UUID issued by the authorization server for your application. + ClientID string + // Scopes is the OpenID scopes used to request access a protected API. + Scopes []string +} + +// NewDeviceCodeResult creates a DeviceCodeResult instance. +func NewDeviceCodeResult(userCode, deviceCode, verificationURL string, expiresOn time.Time, interval int, message, clientID string, scopes []string) DeviceCodeResult { + return DeviceCodeResult{userCode, deviceCode, verificationURL, expiresOn, interval, message, clientID, scopes} +} + +func (dcr DeviceCodeResult) String() string { + return fmt.Sprintf("UserCode: (%v)\nDeviceCode: (%v)\nURL: (%v)\nMessage: (%v)\n", dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, dcr.Message) + +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go new file mode 100644 index 000000000000..4724d944ff88 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -0,0 +1,421 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package authority + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/google/uuid" +) + +const ( + authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" + instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" + tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" + regionName = "REGION_NAME" + defaultAPIVersion = "2021-10-01" + imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion + defaultHost = "login.microsoftonline.com" + autoDetectRegion = "TryAutoDetect" +) + +type jsonCaller interface { + JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error +} + +var aadTrustedHostList = map[string]bool{ + "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list + "login.chinacloudapi.cn": true, // Microsoft Azure China + "login.microsoftonline.de": true, // Microsoft Azure Blackforest + "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy + "login.microsoftonline.us": true, // Microsoft Azure US Government + "login.microsoftonline.com": true, // Microsoft Azure Worldwide + "login.cloudgovapi.us": true, // Microsoft Azure US Government +} + +// TrustedHost checks if an AAD host is trusted/valid. +func TrustedHost(host string) bool { + if _, ok := aadTrustedHostList[host]; ok { + return true + } + return false +} + +type OAuthResponseBase struct { + Error string `json:"error"` + SubError string `json:"suberror"` + ErrorDescription string `json:"error_description"` + ErrorCodes []int `json:"error_codes"` + CorrelationID string `json:"correlation_id"` + Claims string `json:"claims"` +} + +// TenantDiscoveryResponse is the tenant endpoints from the OpenID configuration endpoint. +type TenantDiscoveryResponse struct { + OAuthResponseBase + + AuthorizationEndpoint string `json:"authorization_endpoint"` + TokenEndpoint string `json:"token_endpoint"` + Issuer string `json:"issuer"` + + AdditionalFields map[string]interface{} +} + +// Validate validates that the response had the correct values required. +func (r *TenantDiscoveryResponse) Validate() error { + switch "" { + case r.AuthorizationEndpoint: + return errors.New("TenantDiscoveryResponse: authorize endpoint was not found in the openid configuration") + case r.TokenEndpoint: + return errors.New("TenantDiscoveryResponse: token endpoint was not found in the openid configuration") + case r.Issuer: + return errors.New("TenantDiscoveryResponse: issuer was not found in the openid configuration") + } + return nil +} + +type InstanceDiscoveryMetadata struct { + PreferredNetwork string `json:"preferred_network"` + PreferredCache string `json:"preferred_cache"` + Aliases []string `json:"aliases"` + + AdditionalFields map[string]interface{} +} + +type InstanceDiscoveryResponse struct { + TenantDiscoveryEndpoint string `json:"tenant_discovery_endpoint"` + Metadata []InstanceDiscoveryMetadata `json:"metadata"` + + AdditionalFields map[string]interface{} +} + +//go:generate stringer -type=AuthorizeType + +// AuthorizeType represents the type of token flow. +type AuthorizeType int + +// These are all the types of token flows. +const ( + ATUnknown AuthorizeType = iota + ATUsernamePassword + ATWindowsIntegrated + ATAuthCode + ATInteractive + ATClientCredentials + ATDeviceCode + ATRefreshToken + AccountByID + ATOnBehalfOf +) + +// These are all authority types +const ( + AAD = "MSSTS" + ADFS = "ADFS" +) + +// AuthParams represents the parameters used for authorization for token acquisition. +type AuthParams struct { + AuthorityInfo Info + CorrelationID string + Endpoints Endpoints + ClientID string + // Redirecturi is used for auth flows that specify a redirect URI (e.g. local server for interactive auth flow). + Redirecturi string + HomeAccountID string + // Username is the user-name portion for username/password auth flow. + Username string + // Password is the password portion for username/password auth flow. + Password string + // Scopes is the list of scopes the user consents to. + Scopes []string + // AuthorizationType specifies the auth flow being used. + AuthorizationType AuthorizeType + // State is a random value used to prevent cross-site request forgery attacks. + State string + // CodeChallenge is derived from a code verifier and is sent in the auth request. + CodeChallenge string + // CodeChallengeMethod describes the method used to create the CodeChallenge. + CodeChallengeMethod string + // Prompt specifies the user prompt type during interactive auth. + Prompt string + // IsConfidentialClient specifies if it is a confidential client. + IsConfidentialClient bool + // SendX5C specifies if x5c claim(public key of the certificate) should be sent to STS. + SendX5C bool + // UserAssertion is the access token used to acquire token on behalf of user + UserAssertion string + + // KnownAuthorityHosts don't require metadata discovery because they're known to the user + KnownAuthorityHosts []string +} + +// NewAuthParams creates an authorization parameters object. +func NewAuthParams(clientID string, authorityInfo Info) AuthParams { + return AuthParams{ + ClientID: clientID, + AuthorityInfo: authorityInfo, + CorrelationID: uuid.New().String(), + } +} + +// Info consists of information about the authority. +type Info struct { + Host string + CanonicalAuthorityURI string + AuthorityType string + UserRealmURIPrefix string + ValidateAuthority bool + Tenant string + Region string +} + +func firstPathSegment(u *url.URL) (string, error) { + pathParts := strings.Split(u.EscapedPath(), "/") + if len(pathParts) >= 2 { + return pathParts[1], nil + } + + return "", errors.New("authority does not have two segments") +} + +// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. +func NewInfoFromAuthorityURI(authorityURI string, validateAuthority bool) (Info, error) { + authorityURI = strings.ToLower(authorityURI) + var authorityType string + u, err := url.Parse(authorityURI) + if err != nil { + return Info{}, fmt.Errorf("authorityURI passed could not be parsed: %w", err) + } + if u.Scheme != "https" { + return Info{}, fmt.Errorf("authorityURI(%s) must have scheme https", authorityURI) + } + + tenant, err := firstPathSegment(u) + if tenant == "adfs" { + authorityType = ADFS + } else { + authorityType = AAD + } + + if err != nil { + return Info{}, err + } + + return Info{ + Host: u.Hostname(), + CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Hostname(), tenant), + AuthorityType: authorityType, + UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), + ValidateAuthority: validateAuthority, + Tenant: tenant, + }, nil +} + +// Endpoints consists of the endpoints from the tenant discovery response. +type Endpoints struct { + AuthorizationEndpoint string + TokenEndpoint string + selfSignedJwtAudience string + authorityHost string +} + +// NewEndpoints creates an Endpoints object. +func NewEndpoints(authorizationEndpoint string, tokenEndpoint string, selfSignedJwtAudience string, authorityHost string) Endpoints { + return Endpoints{authorizationEndpoint, tokenEndpoint, selfSignedJwtAudience, authorityHost} +} + +// UserRealmAccountType refers to the type of user realm. +type UserRealmAccountType string + +// These are the different types of user realms. +const ( + Unknown UserRealmAccountType = "" + Federated UserRealmAccountType = "Federated" + Managed UserRealmAccountType = "Managed" +) + +// UserRealm is used for the username password request to determine user type +type UserRealm struct { + AccountType UserRealmAccountType `json:"account_type"` + DomainName string `json:"domain_name"` + CloudInstanceName string `json:"cloud_instance_name"` + CloudAudienceURN string `json:"cloud_audience_urn"` + + // required if accountType is Federated + FederationProtocol string `json:"federation_protocol"` + FederationMetadataURL string `json:"federation_metadata_url"` + + AdditionalFields map[string]interface{} +} + +func (u UserRealm) validate() error { + switch "" { + case string(u.AccountType): + return errors.New("the account type (Federated or Managed) is missing") + case u.DomainName: + return errors.New("domain name of user realm is missing") + case u.CloudInstanceName: + return errors.New("cloud instance name of user realm is missing") + case u.CloudAudienceURN: + return errors.New("cloud Instance URN is missing") + } + + if u.AccountType == Federated { + switch "" { + case u.FederationProtocol: + return errors.New("federation protocol of user realm is missing") + case u.FederationMetadataURL: + return errors.New("federation metadata URL of user realm is missing") + } + } + return nil +} + +// Client represents the REST calls to authority backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm jsonCaller // *comm.Client +} + +func (c Client) UserRealm(ctx context.Context, authParams AuthParams) (UserRealm, error) { + endpoint := fmt.Sprintf("https://%s/common/UserRealm/%s", authParams.Endpoints.authorityHost, url.PathEscape(authParams.Username)) + qv := url.Values{ + "api-version": []string{"1.0"}, + } + + resp := UserRealm{} + err := c.Comm.JSONCall( + ctx, + endpoint, + http.Header{"client-request-id": []string{authParams.CorrelationID}}, + qv, + nil, + &resp, + ) + if err != nil { + return resp, err + } + + return resp, resp.validate() +} + +func (c Client) GetTenantDiscoveryResponse(ctx context.Context, openIDConfigurationEndpoint string) (TenantDiscoveryResponse, error) { + resp := TenantDiscoveryResponse{} + err := c.Comm.JSONCall( + ctx, + openIDConfigurationEndpoint, + http.Header{}, + nil, + nil, + &resp, + ) + + return resp, err +} + +func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (InstanceDiscoveryResponse, error) { + region := "" + var err error + resp := InstanceDiscoveryResponse{} + if authorityInfo.Region != "" && authorityInfo.Region != autoDetectRegion { + region = authorityInfo.Region + } else if authorityInfo.Region == autoDetectRegion { + region = detectRegion(ctx) + } + if region != "" { + environment := authorityInfo.Host + switch environment { + case "login.microsoft.com", "login.windows.net", "sts.windows.net", defaultHost: + environment = "r." + defaultHost + } + resp.TenantDiscoveryEndpoint = fmt.Sprintf(tenantDiscoveryEndpointWithRegion, region, environment, authorityInfo.Tenant) + metadata := InstanceDiscoveryMetadata{ + PreferredNetwork: fmt.Sprintf("%v.%v", region, authorityInfo.Host), + PreferredCache: authorityInfo.Host, + Aliases: []string{fmt.Sprintf("%v.%v", region, authorityInfo.Host), authorityInfo.Host}, + } + resp.Metadata = []InstanceDiscoveryMetadata{metadata} + } else { + qv := url.Values{} + qv.Set("api-version", "1.1") + qv.Set("authorization_endpoint", fmt.Sprintf(authorizationEndpoint, authorityInfo.Host, authorityInfo.Tenant)) + + discoveryHost := defaultHost + if TrustedHost(authorityInfo.Host) { + discoveryHost = authorityInfo.Host + } + + endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost) + err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp) + } + return resp, err +} + +func detectRegion(ctx context.Context) string { + region := os.Getenv(regionName) + if region != "" { + region = strings.ReplaceAll(region, " ", "") + return strings.ToLower(region) + } + // HTTP call to IMDS endpoint to get region + // Refer : https://identitydivision.visualstudio.com/DevEx/_git/AuthLibrariesApiReview?path=%2FPinAuthToRegion%2FAAD%20SDK%20Proposal%20to%20Pin%20Auth%20to%20region.md&_a=preview&version=GBdev + // Set a 2 second timeout for this http client which only does calls to IMDS endpoint + client := http.Client{ + Timeout: time.Duration(2 * time.Second), + } + req, _ := http.NewRequest("GET", imdsEndpoint, nil) + req.Header.Set("Metadata", "true") + resp, err := client.Do(req) + // If the request times out or there is an error, it is retried once + if err != nil || resp.StatusCode != 200 { + resp, err = client.Do(req) + if err != nil || resp.StatusCode != 200 { + return "" + } + } + defer resp.Body.Close() + response, err := io.ReadAll(resp.Body) + if err != nil { + return "" + } + return string(response) +} + +func (a *AuthParams) CacheKey(isAppCache bool) string { + if a.AuthorizationType == ATOnBehalfOf { + return a.AssertionHash() + } + if a.AuthorizationType == ATClientCredentials || isAppCache { + return a.AppKey() + } + if a.AuthorizationType == ATRefreshToken || a.AuthorizationType == AccountByID { + return a.HomeAccountID + } + return "" +} +func (a *AuthParams) AssertionHash() string { + hasher := sha256.New() + // Per documentation this never returns an error : https://pkg.go.dev/hash#pkg-types + _, _ = hasher.Write([]byte(a.UserAssertion)) + sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + return sha +} + +func (a *AuthParams) AppKey() string { + if a.AuthorityInfo.Tenant != "" { + return fmt.Sprintf("%s_%s_AppTokenCache", a.ClientID, a.AuthorityInfo.Tenant) + } + return fmt.Sprintf("%s__AppTokenCache", a.ClientID) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go new file mode 100644 index 000000000000..10039773b067 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=AuthorizeType"; DO NOT EDIT. + +package authority + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ATUnknown-0] + _ = x[ATUsernamePassword-1] + _ = x[ATWindowsIntegrated-2] + _ = x[ATAuthCode-3] + _ = x[ATInteractive-4] + _ = x[ATClientCredentials-5] + _ = x[ATDeviceCode-6] + _ = x[ATRefreshToken-7] +} + +const _AuthorizeType_name = "ATUnknownATUsernamePasswordATWindowsIntegratedATAuthCodeATInteractiveATClientCredentialsATDeviceCodeATRefreshToken" + +var _AuthorizeType_index = [...]uint8{0, 9, 27, 46, 56, 69, 88, 100, 114} + +func (i AuthorizeType) String() string { + if i < 0 || i >= AuthorizeType(len(_AuthorizeType_index)-1) { + return "AuthorizeType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AuthorizeType_name[_AuthorizeType_index[i]:_AuthorizeType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go new file mode 100644 index 000000000000..7d9ec7cd3742 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -0,0 +1,320 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package comm provides helpers for communicating with HTTP backends. +package comm + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "runtime" + "strings" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" + "github.com/google/uuid" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient interface { + // Do sends an HTTP request and returns an HTTP response. + Do(req *http.Request) (*http.Response, error) + + // CloseIdleConnections closes any idle connections in a "keep-alive" state. + CloseIdleConnections() +} + +// Client provides a wrapper to our *http.Client that handles compression and serialization needs. +type Client struct { + client HTTPClient +} + +// New returns a new Client object. +func New(httpClient HTTPClient) *Client { + if httpClient == nil { + panic("http.Client cannot == nil") + } + + return &Client{client: httpClient} +} + +// JSONCall connects to the REST endpoint passing the HTTP query values, headers and JSON conversion +// of body in the HTTP body. It automatically handles compression and decompression with gzip. The response is JSON +// unmarshalled into resp. resp must be a pointer to a struct. If the body struct contains a field called +// "AdditionalFields" we use a custom marshal/unmarshal engine. +func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error { + if qv == nil { + qv = url.Values{} + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + // Choose a JSON marshal/unmarshal depending on if we have AdditionalFields attribute. + var marshal = json.Marshal + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + marshal = customJSON.Marshal + unmarshal = customJSON.Unmarshal + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + addStdHeaders(headers) + + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if body != nil { + // Note: In case your wondering why we are not gzip encoding.... + // I'm not sure if these various services support gzip on send. + headers.Add("Content-Type", "application/json; charset=utf-8") + data, err := marshal(body) + if err != nil { + return fmt.Errorf("bug: conn.Call(): could not marshal the body object: %w", err) + } + req.Body = io.NopCloser(bytes.NewBuffer(data)) + req.Method = http.MethodPost + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data)) + } + } + return nil +} + +// XMLCall connects to an endpoint and decodes the XML response into resp. This is used when +// sending application/xml . If sending XML via SOAP, use SOAPCall(). +func (c *Client) XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error { + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/xml; charset=utf-8") // This was not set in he original Mex(), but... + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, "", resp) +} + +// SOAPCall returns the SOAP message given an endpoint, action, body of the request and the response object to marshal into. +func (c *Client) SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error { + if body == "" { + return fmt.Errorf("cannot make a SOAP call with body set to empty string") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + if qv == nil { + qv = url.Values{} + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + u.RawQuery = qv.Encode() + + headers.Set("Content-Type", "application/soap+xml; charset=utf-8") + headers.Set("SOAPAction", action) + addStdHeaders(headers) + + return c.xmlCall(ctx, u, headers, body, resp) +} + +// xmlCall sends an XML in body and decodes into resp. This simply does the transport and relies on +// an upper level call to set things such as SOAP parameters and Content-Type, if required. +func (c *Client) xmlCall(ctx context.Context, u *url.URL, headers http.Header, body string, resp interface{}) error { + req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + + if len(body) > 0 { + req.Method = http.MethodPost + req.Body = io.NopCloser(strings.NewReader(body)) + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + return xml.Unmarshal(data, resp) +} + +// URLFormCall is used to make a call where we need to send application/x-www-form-urlencoded data +// to the backend and receive JSON back. qv will be encoded into the request body. +func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error { + if len(qv) == 0 { + return fmt.Errorf("URLFormCall() requires qv to have non-zero length") + } + + if err := c.checkResp(reflect.ValueOf(resp)); err != nil { + return err + } + + u, err := url.Parse(endpoint) + if err != nil { + return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + } + + headers := http.Header{} + headers.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + addStdHeaders(headers) + + enc := qv.Encode() + + req := &http.Request{ + Method: http.MethodPost, + URL: u, + Header: headers, + ContentLength: int64(len(enc)), + Body: io.NopCloser(strings.NewReader(enc)), + GetBody: func() (io.ReadCloser, error) { + return io.NopCloser(strings.NewReader(enc)), nil + }, + } + + data, err := c.do(ctx, req) + if err != nil { + return err + } + + v := reflect.ValueOf(resp) + if err := c.checkResp(v); err != nil { + return err + } + + var unmarshal = json.Unmarshal + if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok { + unmarshal = customJSON.Unmarshal + } + if resp != nil { + if err := unmarshal(data, resp); err != nil { + return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data)) + } + } + return nil +} + +// do makes the HTTP call to the server and returns the contents of the body. +func (c *Client) do(ctx context.Context, req *http.Request) ([]byte, error) { + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 30*time.Second) + defer cancel() + } + req = req.WithContext(ctx) + + reply, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("server response error:\n %w", err) + } + defer reply.Body.Close() + + data, err := c.readBody(reply) + if err != nil { + return nil, fmt.Errorf("could not read the body of an HTTP Response: %w", err) + } + reply.Body = io.NopCloser(bytes.NewBuffer(data)) + + // NOTE: This doesn't happen immediately after the call so that we can get an error message + // from the server and include it in our error. + switch reply.StatusCode { + case 200, 201: + default: + sd := strings.TrimSpace(string(data)) + if sd != "" { + // We probably have the error in the body. + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, sd), + } + } + return nil, errors.CallErr{ + Req: req, + Resp: reply, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, reply.StatusCode), + } + } + + return data, nil +} + +// checkResp checks a response object o make sure it is a pointer to a struct. +func (c *Client) checkResp(v reflect.Value) error { + if v.Kind() != reflect.Ptr { + return fmt.Errorf("bug: resp argument must a *struct, was %T", v.Interface()) + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return fmt.Errorf("bug: resp argument must be a *struct, was %T", v.Interface()) + } + return nil +} + +// readBody reads the body out of an *http.Response. It supports gzip encoded responses. +func (c *Client) readBody(resp *http.Response) ([]byte, error) { + var reader io.Reader = resp.Body + switch resp.Header.Get("Content-Encoding") { + case "": + // Do nothing + case "gzip": + reader = gzipDecompress(resp.Body) + default: + return nil, fmt.Errorf("bug: comm.Client.JSONCall(): content was send with unsupported content-encoding %s", resp.Header.Get("Content-Encoding")) + } + return io.ReadAll(reader) +} + +var testID string + +// addStdHeaders adds the standard headers we use on all calls. +func addStdHeaders(headers http.Header) http.Header { + headers.Set("Accept-Encoding", "gzip") + // So that I can have a static id for tests. + if testID != "" { + headers.Set("client-request-id", testID) + headers.Set("Return-Client-Request-Id", "false") + } else { + headers.Set("client-request-id", uuid.New().String()) + headers.Set("Return-Client-Request-Id", "false") + } + headers.Set("x-client-sku", "MSAL.Go") + headers.Set("x-client-os", runtime.GOOS) + headers.Set("x-client-cpu", runtime.GOARCH) + headers.Set("x-client-ver", version.Version) + return headers +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go new file mode 100644 index 000000000000..4d3dbfcf0a6b --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package comm + +import ( + "compress/gzip" + "io" +) + +func gzipDecompress(r io.Reader) io.Reader { + gzipReader, _ := gzip.NewReader(r) + + pipeOut, pipeIn := io.Pipe() + go func() { + // decompression bomb would have to come from Azure services. + // If we want to limit, we should do that in comm.do(). + _, err := io.Copy(pipeIn, gzipReader) //nolint + if err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + gzipReader.Close() + return + } + if err := gzipReader.Close(); err != nil { + // don't need the error. + pipeIn.CloseWithError(err) //nolint + return + } + pipeIn.Close() + }() + return pipeOut +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go new file mode 100644 index 000000000000..b628f61ac081 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package grant holds types of grants issued by authorization services. +package grant + +const ( + Password = "password" + JWT = "urn:ietf:params:oauth:grant-type:jwt-bearer" + SAMLV1 = "urn:ietf:params:oauth:grant-type:saml1_1-bearer" + SAMLV2 = "urn:ietf:params:oauth:grant-type:saml2-bearer" + DeviceCode = "device_code" + AuthCode = "authorization_code" + RefreshToken = "refresh_token" + ClientCredential = "client_credentials" + ClientAssertion = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" +) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go new file mode 100644 index 000000000000..1f9c543fa3b2 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package ops provides operations to various backend services using REST clients. + +The REST type provides several clients that can be used to communicate to backends. +Usage is simple: + + rest := ops.New() + + // Creates an authority client and calls the UserRealm() method. + userRealm, err := rest.Authority().UserRealm(ctx, authParameters) + if err != nil { + // Do something + } +*/ +package ops + +import ( + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" +) + +// HTTPClient represents an HTTP client. +// It's usually an *http.Client from the standard library. +type HTTPClient = comm.HTTPClient + +// REST provides REST clients for communicating with various backends used by MSAL. +type REST struct { + client *comm.Client +} + +// New is the constructor for REST. +func New(httpClient HTTPClient) *REST { + return &REST{client: comm.New(httpClient)} +} + +// Authority returns a client for querying information about various authorities. +func (r *REST) Authority() authority.Client { + return authority.Client{Comm: r.client} +} + +// AccessTokens returns a client that can be used to get various access tokens for +// authorization purposes. +func (r *REST) AccessTokens() accesstokens.Client { + return accesstokens.Client{Comm: r.client} +} + +// WSTrust provides access to various metadata in a WSTrust service. This data can +// be used to gain tokens based on SAML data using the client provided by AccessTokens(). +func (r *REST) WSTrust() wstrust.Client { + return wstrust.Client{Comm: r.client} +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go new file mode 100644 index 000000000000..a2bb6278ae5f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=endpointType"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[etUnknown-0] + _ = x[etUsernamePassword-1] + _ = x[etWindowsTransport-2] +} + +const _endpointType_name = "etUnknownetUsernamePasswordetWindowsTransport" + +var _endpointType_index = [...]uint8{0, 9, 27, 45} + +func (i endpointType) String() string { + if i < 0 || i >= endpointType(len(_endpointType_index)-1) { + return "endpointType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _endpointType_name[_endpointType_index[i]:_endpointType_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go new file mode 100644 index 000000000000..6497270028d8 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go @@ -0,0 +1,394 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +type Definitions struct { + XMLName xml.Name `xml:"definitions"` + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + TargetNamespace string `xml:"targetNamespace,attr"` + WSDL string `xml:"wsdl,attr"` + XSD string `xml:"xsd,attr"` + T string `xml:"t,attr"` + SOAPENC string `xml:"soapenc,attr"` + SOAP string `xml:"soap,attr"` + TNS string `xml:"tns,attr"` + MSC string `xml:"msc,attr"` + WSAM string `xml:"wsam,attr"` + SOAP12 string `xml:"soap12,attr"` + WSA10 string `xml:"wsa10,attr"` + WSA string `xml:"wsa,attr"` + WSAW string `xml:"wsaw,attr"` + WSX string `xml:"wsx,attr"` + WSAP string `xml:"wsap,attr"` + WSU string `xml:"wsu,attr"` + Trust string `xml:"trust,attr"` + WSP string `xml:"wsp,attr"` + Policy []Policy `xml:"Policy"` + Types Types `xml:"types"` + Message []Message `xml:"message"` + PortType []PortType `xml:"portType"` + Binding []Binding `xml:"binding"` + Service Service `xml:"service"` +} + +type Policy struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + ExactlyOne ExactlyOne `xml:"ExactlyOne"` +} + +type ExactlyOne struct { + Text string `xml:",chardata"` + All All `xml:"All"` +} + +type All struct { + Text string `xml:",chardata"` + NegotiateAuthentication NegotiateAuthentication `xml:"NegotiateAuthentication"` + TransportBinding TransportBinding `xml:"TransportBinding"` + UsingAddressing Text `xml:"UsingAddressing"` + EndorsingSupportingTokens EndorsingSupportingTokens `xml:"EndorsingSupportingTokens"` + WSS11 WSS11 `xml:"Wss11"` + Trust10 Trust10 `xml:"Trust10"` + SignedSupportingTokens SignedSupportingTokens `xml:"SignedSupportingTokens"` + Trust13 WSTrust13 `xml:"Trust13"` + SignedEncryptedSupportingTokens SignedEncryptedSupportingTokens `xml:"SignedEncryptedSupportingTokens"` +} + +type NegotiateAuthentication struct { + Text string `xml:",chardata"` + HTTP string `xml:"http,attr"` + XMLName xml.Name +} + +type TransportBinding struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy TransportBindingPolicy `xml:"Policy"` +} + +type TransportBindingPolicy struct { + Text string `xml:",chardata"` + TransportToken TransportToken `xml:"TransportToken"` + AlgorithmSuite AlgorithmSuite `xml:"AlgorithmSuite"` + Layout Layout `xml:"Layout"` + IncludeTimestamp Text `xml:"IncludeTimestamp"` +} + +type TransportToken struct { + Text string `xml:",chardata"` + Policy TransportTokenPolicy `xml:"Policy"` +} + +type TransportTokenPolicy struct { + Text string `xml:",chardata"` + HTTPSToken HTTPSToken `xml:"HttpsToken"` +} + +type HTTPSToken struct { + Text string `xml:",chardata"` + RequireClientCertificate string `xml:"RequireClientCertificate,attr"` +} + +type AlgorithmSuite struct { + Text string `xml:",chardata"` + Policy AlgorithmSuitePolicy `xml:"Policy"` +} + +type AlgorithmSuitePolicy struct { + Text string `xml:",chardata"` + Basic256 Text `xml:"Basic256"` + Basic128 Text `xml:"Basic128"` +} + +type Layout struct { + Text string `xml:",chardata"` + Policy LayoutPolicy `xml:"Policy"` +} + +type LayoutPolicy struct { + Text string `xml:",chardata"` + Strict Text `xml:"Strict"` +} + +type EndorsingSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy EndorsingSupportingTokensPolicy `xml:"Policy"` +} + +type EndorsingSupportingTokensPolicy struct { + Text string `xml:",chardata"` + X509Token X509Token `xml:"X509Token"` + RSAToken RSAToken `xml:"RsaToken"` + SignedParts SignedParts `xml:"SignedParts"` + KerberosToken KerberosToken `xml:"KerberosToken"` + IssuedToken IssuedToken `xml:"IssuedToken"` + KeyValueToken KeyValueToken `xml:"KeyValueToken"` +} + +type X509Token struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy X509TokenPolicy `xml:"Policy"` +} + +type X509TokenPolicy struct { + Text string `xml:",chardata"` + RequireThumbprintReference Text `xml:"RequireThumbprintReference"` + WSSX509V3Token10 Text `xml:"WssX509V3Token10"` +} + +type RSAToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` + MSSP string `xml:"mssp,attr"` +} + +type SignedParts struct { + Text string `xml:",chardata"` + Header SignedPartsHeader `xml:"Header"` +} + +type SignedPartsHeader struct { + Text string `xml:",chardata"` + Name string `xml:"Name,attr"` + Namespace string `xml:"Namespace,attr"` +} + +type KerberosToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy KerberosTokenPolicy `xml:"Policy"` +} + +type KerberosTokenPolicy struct { + Text string `xml:",chardata"` + WSSGSSKerberosV5ApReqToken11 Text `xml:"WssGssKerberosV5ApReqToken11"` +} + +type IssuedToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + RequestSecurityTokenTemplate RequestSecurityTokenTemplate `xml:"RequestSecurityTokenTemplate"` + Policy IssuedTokenPolicy `xml:"Policy"` +} + +type RequestSecurityTokenTemplate struct { + Text string `xml:",chardata"` + KeyType Text `xml:"KeyType"` + EncryptWith Text `xml:"EncryptWith"` + SignatureAlgorithm Text `xml:"SignatureAlgorithm"` + CanonicalizationAlgorithm Text `xml:"CanonicalizationAlgorithm"` + EncryptionAlgorithm Text `xml:"EncryptionAlgorithm"` + KeySize Text `xml:"KeySize"` + KeyWrapAlgorithm Text `xml:"KeyWrapAlgorithm"` +} + +type IssuedTokenPolicy struct { + Text string `xml:",chardata"` + RequireInternalReference Text `xml:"RequireInternalReference"` +} + +type KeyValueToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Optional string `xml:"Optional,attr"` +} + +type WSS11 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Wss11Policy `xml:"Policy"` +} + +type Wss11Policy struct { + Text string `xml:",chardata"` + MustSupportRefThumbprint Text `xml:"MustSupportRefThumbprint"` +} + +type Trust10 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy Trust10Policy `xml:"Policy"` +} + +type Trust10Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type SupportingTokensPolicy struct { + Text string `xml:",chardata"` + UsernameToken UsernameToken `xml:"UsernameToken"` +} +type UsernameToken struct { + Text string `xml:",chardata"` + IncludeToken string `xml:"IncludeToken,attr"` + Policy UsernameTokenPolicy `xml:"Policy"` +} + +type UsernameTokenPolicy struct { + Text string `xml:",chardata"` + WSSUsernameToken10 WSSUsernameToken10 `xml:"WssUsernameToken10"` +} + +type WSSUsernameToken10 struct { + Text string `xml:",chardata"` + XMLName xml.Name +} + +type WSTrust13 struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy WSTrust13Policy `xml:"Policy"` +} + +type WSTrust13Policy struct { + Text string `xml:",chardata"` + MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"` + RequireClientEntropy Text `xml:"RequireClientEntropy"` + RequireServerEntropy Text `xml:"RequireServerEntropy"` +} + +type SignedEncryptedSupportingTokens struct { + Text string `xml:",chardata"` + SP string `xml:"sp,attr"` + Policy SupportingTokensPolicy `xml:"Policy"` +} + +type Types struct { + Text string `xml:",chardata"` + Schema Schema `xml:"schema"` +} + +type Schema struct { + Text string `xml:",chardata"` + TargetNamespace string `xml:"targetNamespace,attr"` + Import []Import `xml:"import"` +} + +type Import struct { + Text string `xml:",chardata"` + SchemaLocation string `xml:"schemaLocation,attr"` + Namespace string `xml:"namespace,attr"` +} + +type Message struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Part Part `xml:"part"` +} + +type Part struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Element string `xml:"element,attr"` +} + +type PortType struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation Operation `xml:"operation"` +} + +type Operation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Input OperationIO `xml:"input"` + Output OperationIO `xml:"output"` +} + +type OperationIO struct { + Text string `xml:",chardata"` + Action string `xml:"Action,attr"` + Message string `xml:"message,attr"` + Body OperationIOBody `xml:"body"` +} + +type OperationIOBody struct { + Text string `xml:",chardata"` + Use string `xml:"use,attr"` +} + +type Binding struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + PolicyReference PolicyReference `xml:"PolicyReference"` + Binding DefinitionsBinding `xml:"binding"` + Operation BindingOperation `xml:"operation"` +} + +type PolicyReference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` +} + +type DefinitionsBinding struct { + Text string `xml:",chardata"` + Transport string `xml:"transport,attr"` +} + +type BindingOperation struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Operation BindingOperationOperation `xml:"operation"` + Input BindingOperationIO `xml:"input"` + Output BindingOperationIO `xml:"output"` +} + +type BindingOperationOperation struct { + Text string `xml:",chardata"` + SoapAction string `xml:"soapAction,attr"` + Style string `xml:"style,attr"` +} + +type BindingOperationIO struct { + Text string `xml:",chardata"` + Body OperationIOBody `xml:"body"` +} + +type Service struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Port []Port `xml:"port"` +} + +type Port struct { + Text string `xml:",chardata"` + Name string `xml:"name,attr"` + Binding string `xml:"binding,attr"` + Address Address `xml:"address"` + EndpointReference PortEndpointReference `xml:"EndpointReference"` +} + +type Address struct { + Text string `xml:",chardata"` + Location string `xml:"location,attr"` +} + +type PortEndpointReference struct { + Text string `xml:",chardata"` + Address Text `xml:"Address"` + Identity Identity `xml:"Identity"` +} + +type Identity struct { + Text string `xml:",chardata"` + XMLNS string `xml:"xmlns,attr"` + SPN Text `xml:"Spn"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go new file mode 100644 index 000000000000..7d0725565777 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go @@ -0,0 +1,230 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import "encoding/xml" + +// TODO(msal): Someone (and it ain't gonna be me) needs to document these attributes or +// at the least put a link to RFC. + +type SAMLDefinitions struct { + XMLName xml.Name `xml:"Envelope"` + Text string `xml:",chardata"` + S string `xml:"s,attr"` + A string `xml:"a,attr"` + U string `xml:"u,attr"` + Header Header `xml:"Header"` + Body Body `xml:"Body"` +} + +type Header struct { + Text string `xml:",chardata"` + Action Action `xml:"Action"` + Security Security `xml:"Security"` +} + +type Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` +} + +type Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"mustUnderstand,attr"` + O string `xml:"o,attr"` + Timestamp Timestamp `xml:"Timestamp"` +} + +type Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"Id,attr"` + Created Text `xml:"Created"` + Expires Text `xml:"Expires"` +} + +type Text struct { + Text string `xml:",chardata"` +} + +type Body struct { + Text string `xml:",chardata"` + RequestSecurityTokenResponseCollection RequestSecurityTokenResponseCollection `xml:"RequestSecurityTokenResponseCollection"` +} + +type RequestSecurityTokenResponseCollection struct { + Text string `xml:",chardata"` + Trust string `xml:"trust,attr"` + RequestSecurityTokenResponse []RequestSecurityTokenResponse `xml:"RequestSecurityTokenResponse"` +} + +type RequestSecurityTokenResponse struct { + Text string `xml:",chardata"` + Lifetime Lifetime `xml:"Lifetime"` + AppliesTo AppliesTo `xml:"AppliesTo"` + RequestedSecurityToken RequestedSecurityToken `xml:"RequestedSecurityToken"` + RequestedAttachedReference RequestedAttachedReference `xml:"RequestedAttachedReference"` + RequestedUnattachedReference RequestedUnattachedReference `xml:"RequestedUnattachedReference"` + TokenType Text `xml:"TokenType"` + RequestType Text `xml:"RequestType"` + KeyType Text `xml:"KeyType"` +} + +type Lifetime struct { + Text string `xml:",chardata"` + Created WSUTimestamp `xml:"Created"` + Expires WSUTimestamp `xml:"Expires"` +} + +type WSUTimestamp struct { + Text string `xml:",chardata"` + Wsu string `xml:"wsu,attr"` +} + +type AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"wsp,attr"` + EndpointReference EndpointReference `xml:"EndpointReference"` +} + +type EndpointReference struct { + Text string `xml:",chardata"` + Wsa string `xml:"wsa,attr"` + Address Text `xml:"Address"` +} + +type RequestedSecurityToken struct { + Text string `xml:",chardata"` + AssertionRawXML string `xml:",innerxml"` + Assertion Assertion `xml:"Assertion"` +} + +type Assertion struct { + XMLName xml.Name // Normally its `xml:"Assertion"`, but I think they want to capture the xmlns + Text string `xml:",chardata"` + MajorVersion string `xml:"MajorVersion,attr"` + MinorVersion string `xml:"MinorVersion,attr"` + AssertionID string `xml:"AssertionID,attr"` + Issuer string `xml:"Issuer,attr"` + IssueInstant string `xml:"IssueInstant,attr"` + Saml string `xml:"saml,attr"` + Conditions Conditions `xml:"Conditions"` + AttributeStatement AttributeStatement `xml:"AttributeStatement"` + AuthenticationStatement AuthenticationStatement `xml:"AuthenticationStatement"` + Signature Signature `xml:"Signature"` +} + +type Conditions struct { + Text string `xml:",chardata"` + NotBefore string `xml:"NotBefore,attr"` + NotOnOrAfter string `xml:"NotOnOrAfter,attr"` + AudienceRestrictionCondition AudienceRestrictionCondition `xml:"AudienceRestrictionCondition"` +} + +type AudienceRestrictionCondition struct { + Text string `xml:",chardata"` + Audience Text `xml:"Audience"` +} + +type AttributeStatement struct { + Text string `xml:",chardata"` + Subject Subject `xml:"Subject"` + Attribute []Attribute `xml:"Attribute"` +} + +type Subject struct { + Text string `xml:",chardata"` + NameIdentifier NameIdentifier `xml:"NameIdentifier"` + SubjectConfirmation SubjectConfirmation `xml:"SubjectConfirmation"` +} + +type NameIdentifier struct { + Text string `xml:",chardata"` + Format string `xml:"Format,attr"` +} + +type SubjectConfirmation struct { + Text string `xml:",chardata"` + ConfirmationMethod Text `xml:"ConfirmationMethod"` +} + +type Attribute struct { + Text string `xml:",chardata"` + AttributeName string `xml:"AttributeName,attr"` + AttributeNamespace string `xml:"AttributeNamespace,attr"` + AttributeValue Text `xml:"AttributeValue"` +} + +type AuthenticationStatement struct { + Text string `xml:",chardata"` + AuthenticationMethod string `xml:"AuthenticationMethod,attr"` + AuthenticationInstant string `xml:"AuthenticationInstant,attr"` + Subject Subject `xml:"Subject"` +} + +type Signature struct { + Text string `xml:",chardata"` + Ds string `xml:"ds,attr"` + SignedInfo SignedInfo `xml:"SignedInfo"` + SignatureValue Text `xml:"SignatureValue"` + KeyInfo KeyInfo `xml:"KeyInfo"` +} + +type SignedInfo struct { + Text string `xml:",chardata"` + CanonicalizationMethod Method `xml:"CanonicalizationMethod"` + SignatureMethod Method `xml:"SignatureMethod"` + Reference Reference `xml:"Reference"` +} + +type Method struct { + Text string `xml:",chardata"` + Algorithm string `xml:"Algorithm,attr"` +} + +type Reference struct { + Text string `xml:",chardata"` + URI string `xml:"URI,attr"` + Transforms Transforms `xml:"Transforms"` + DigestMethod Method `xml:"DigestMethod"` + DigestValue Text `xml:"DigestValue"` +} + +type Transforms struct { + Text string `xml:",chardata"` + Transform []Method `xml:"Transform"` +} + +type KeyInfo struct { + Text string `xml:",chardata"` + Xmlns string `xml:"xmlns,attr"` + X509Data X509Data `xml:"X509Data"` +} + +type X509Data struct { + Text string `xml:",chardata"` + X509Certificate Text `xml:"X509Certificate"` +} + +type RequestedAttachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} + +type SecurityTokenReference struct { + Text string `xml:",chardata"` + TokenType string `xml:"TokenType,attr"` + O string `xml:"o,attr"` + K string `xml:"k,attr"` + KeyIdentifier KeyIdentifier `xml:"KeyIdentifier"` +} + +type KeyIdentifier struct { + Text string `xml:",chardata"` + ValueType string `xml:"ValueType,attr"` +} + +type RequestedUnattachedReference struct { + Text string `xml:",chardata"` + SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"` +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go new file mode 100644 index 000000000000..6fe5efa8a9ab --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Version"; DO NOT EDIT. + +package defs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TrustUnknown-0] + _ = x[Trust2005-1] + _ = x[Trust13-2] +} + +const _Version_name = "TrustUnknownTrust2005Trust13" + +var _Version_index = [...]uint8{0, 12, 21, 28} + +func (i Version) String() string { + if i < 0 || i >= Version(len(_Version_index)-1) { + return "Version(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Version_name[_Version_index[i]:_Version_index[i+1]] +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go new file mode 100644 index 000000000000..8fad5efb5de5 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go @@ -0,0 +1,199 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "encoding/xml" + "fmt" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + uuid "github.com/google/uuid" +) + +//go:generate stringer -type=Version + +type Version int + +const ( + TrustUnknown Version = iota + Trust2005 + Trust13 +) + +// Endpoint represents a WSTrust endpoint. +type Endpoint struct { + // Version is the version of the endpoint. + Version Version + // URL is the URL of the endpoint. + URL string +} + +type wsTrustTokenRequestEnvelope struct { + XMLName xml.Name `xml:"s:Envelope"` + Text string `xml:",chardata"` + S string `xml:"xmlns:s,attr"` + Wsa string `xml:"xmlns:wsa,attr"` + Wsu string `xml:"xmlns:wsu,attr"` + Header struct { + Text string `xml:",chardata"` + Action struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:Action"` + MessageID struct { + Text string `xml:",chardata"` + } `xml:"wsa:messageID"` + ReplyTo struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:ReplyTo"` + To struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + } `xml:"wsa:To"` + Security struct { + Text string `xml:",chardata"` + MustUnderstand string `xml:"s:mustUnderstand,attr"` + Wsse string `xml:"xmlns:wsse,attr"` + Timestamp struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Created struct { + Text string `xml:",chardata"` + } `xml:"wsu:Created"` + Expires struct { + Text string `xml:",chardata"` + } `xml:"wsu:Expires"` + } `xml:"wsu:Timestamp"` + UsernameToken struct { + Text string `xml:",chardata"` + ID string `xml:"wsu:Id,attr"` + Username struct { + Text string `xml:",chardata"` + } `xml:"wsse:Username"` + Password struct { + Text string `xml:",chardata"` + } `xml:"wsse:Password"` + } `xml:"wsse:UsernameToken"` + } `xml:"wsse:Security"` + } `xml:"s:Header"` + Body struct { + Text string `xml:",chardata"` + RequestSecurityToken struct { + Text string `xml:",chardata"` + Wst string `xml:"xmlns:wst,attr"` + AppliesTo struct { + Text string `xml:",chardata"` + Wsp string `xml:"xmlns:wsp,attr"` + EndpointReference struct { + Text string `xml:",chardata"` + Address struct { + Text string `xml:",chardata"` + } `xml:"wsa:Address"` + } `xml:"wsa:EndpointReference"` + } `xml:"wsp:AppliesTo"` + KeyType struct { + Text string `xml:",chardata"` + } `xml:"wst:KeyType"` + RequestType struct { + Text string `xml:",chardata"` + } `xml:"wst:RequestType"` + } `xml:"wst:RequestSecurityToken"` + } `xml:"s:Body"` +} + +func buildTimeString(t time.Time) string { + // Golang time formats are weird: https://stackoverflow.com/questions/20234104/how-to-format-current-time-using-a-yyyymmddhhmmss-format + return t.Format("2006-01-02T15:04:05.000Z") +} + +func (wte *Endpoint) buildTokenRequestMessage(authType authority.AuthorizeType, cloudAudienceURN string, username string, password string) (string, error) { + var soapAction string + var trustNamespace string + var keyType string + var requestType string + + createdTime := time.Now().UTC() + expiresTime := createdTime.Add(10 * time.Minute) + + switch wte.Version { + case Trust2005: + soapAction = trust2005Spec + trustNamespace = "http://schemas.xmlsoap.org/ws/2005/02/trust" + keyType = "http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey" + requestType = "http://schemas.xmlsoap.org/ws/2005/02/trust/Issue" + case Trust13: + soapAction = trust13Spec + trustNamespace = "http://docs.oasis-open.org/ws-sx/ws-trust/200512" + keyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer" + requestType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Issue" + default: + return "", fmt.Errorf("buildTokenRequestMessage had Version == %q, which is not recognized", wte.Version) + } + + var envelope wsTrustTokenRequestEnvelope + + messageUUID := uuid.New() + + envelope.S = "http://www.w3.org/2003/05/soap-envelope" + envelope.Wsa = "http://www.w3.org/2005/08/addressing" + envelope.Wsu = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd" + + envelope.Header.Action.MustUnderstand = "1" + envelope.Header.Action.Text = soapAction + envelope.Header.MessageID.Text = "urn:uuid:" + messageUUID.String() + envelope.Header.ReplyTo.Address.Text = "http://www.w3.org/2005/08/addressing/anonymous" + envelope.Header.To.MustUnderstand = "1" + envelope.Header.To.Text = wte.URL + + switch authType { + case authority.ATUnknown: + return "", fmt.Errorf("buildTokenRequestMessage had no authority type(%v)", authType) + case authority.ATUsernamePassword: + endpointUUID := uuid.New() + + var trustID string + if wte.Version == Trust2005 { + trustID = "UnPwSecTok2005-" + endpointUUID.String() + } else { + trustID = "UnPwSecTok13-" + endpointUUID.String() + } + + envelope.Header.Security.MustUnderstand = "1" + envelope.Header.Security.Wsse = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd" + envelope.Header.Security.Timestamp.ID = "MSATimeStamp" + envelope.Header.Security.Timestamp.Created.Text = buildTimeString(createdTime) + envelope.Header.Security.Timestamp.Expires.Text = buildTimeString(expiresTime) + envelope.Header.Security.UsernameToken.ID = trustID + envelope.Header.Security.UsernameToken.Username.Text = username + envelope.Header.Security.UsernameToken.Password.Text = password + default: + // This is just to note that we don't do anything for other cases. + // We aren't missing anything I know of. + } + + envelope.Body.RequestSecurityToken.Wst = trustNamespace + envelope.Body.RequestSecurityToken.AppliesTo.Wsp = "http://schemas.xmlsoap.org/ws/2004/09/policy" + envelope.Body.RequestSecurityToken.AppliesTo.EndpointReference.Address.Text = cloudAudienceURN + envelope.Body.RequestSecurityToken.KeyType.Text = keyType + envelope.Body.RequestSecurityToken.RequestType.Text = requestType + + output, err := xml.Marshal(envelope) + if err != nil { + return "", err + } + + return string(output), nil +} + +func (wte *Endpoint) BuildTokenRequestMessageWIA(cloudAudienceURN string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATWindowsIntegrated, cloudAudienceURN, "", "") +} + +func (wte *Endpoint) BuildTokenRequestMessageUsernamePassword(cloudAudienceURN string, username string, password string) (string, error) { + return wte.buildTokenRequestMessage(authority.ATUsernamePassword, cloudAudienceURN, username, password) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go new file mode 100644 index 000000000000..e3d19886ebc5 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go @@ -0,0 +1,159 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package defs + +import ( + "errors" + "fmt" + "strings" +) + +//go:generate stringer -type=endpointType + +type endpointType int + +const ( + etUnknown endpointType = iota + etUsernamePassword + etWindowsTransport +) + +type wsEndpointData struct { + Version Version + EndpointType endpointType +} + +const trust13Spec string = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" +const trust2005Spec string = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" + +type MexDocument struct { + UsernamePasswordEndpoint Endpoint + WindowsTransportEndpoint Endpoint + policies map[string]endpointType + bindings map[string]wsEndpointData +} + +func updateEndpoint(cached *Endpoint, found Endpoint) { + if cached == nil || cached.Version == TrustUnknown { + *cached = found + return + } + if (*cached).Version == Trust2005 && found.Version == Trust13 { + *cached = found + return + } +} + +// TODO(msal): Someone needs to write tests for everything below. + +// NewFromDef creates a new MexDocument. +func NewFromDef(defs Definitions) (MexDocument, error) { + policies, err := policies(defs) + if err != nil { + return MexDocument{}, err + } + + bindings, err := bindings(defs, policies) + if err != nil { + return MexDocument{}, err + } + + userPass, windows, err := endpoints(defs, bindings) + if err != nil { + return MexDocument{}, err + } + + return MexDocument{ + UsernamePasswordEndpoint: userPass, + WindowsTransportEndpoint: windows, + policies: policies, + bindings: bindings, + }, nil +} + +func policies(defs Definitions) (map[string]endpointType, error) { + policies := make(map[string]endpointType, len(defs.Policy)) + + for _, policy := range defs.Policy { + if policy.ExactlyOne.All.NegotiateAuthentication.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etWindowsTransport + } + } + + if policy.ExactlyOne.All.SignedEncryptedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + if policy.ExactlyOne.All.SignedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" { + if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" { + policies["#"+policy.ID] = etUsernamePassword + } + } + } + + if len(policies) == 0 { + return policies, errors.New("no policies for mex document") + } + + return policies, nil +} + +func bindings(defs Definitions, policies map[string]endpointType) (map[string]wsEndpointData, error) { + bindings := make(map[string]wsEndpointData, len(defs.Binding)) + + for _, binding := range defs.Binding { + policyName := binding.PolicyReference.URI + transport := binding.Binding.Transport + + if transport == "http://schemas.xmlsoap.org/soap/http" { + if policy, ok := policies[policyName]; ok { + bindingName := binding.Name + specVersion := binding.Operation.Operation.SoapAction + + if specVersion == trust13Spec { + bindings[bindingName] = wsEndpointData{Trust13, policy} + } else if specVersion == trust2005Spec { + bindings[bindingName] = wsEndpointData{Trust2005, policy} + } else { + return nil, errors.New("found unknown spec version in mex document") + } + } + } + } + return bindings, nil +} + +func endpoints(defs Definitions, bindings map[string]wsEndpointData) (userPass, windows Endpoint, err error) { + for _, port := range defs.Service.Port { + bindingName := port.Binding + + index := strings.Index(bindingName, ":") + if index != -1 { + bindingName = bindingName[index+1:] + } + + if binding, ok := bindings[bindingName]; ok { + url := strings.TrimSpace(port.EndpointReference.Address.Text) + if url == "" { + return Endpoint{}, Endpoint{}, fmt.Errorf("MexDocument cannot have blank URL endpoint") + } + if binding.Version == TrustUnknown { + return Endpoint{}, Endpoint{}, fmt.Errorf("endpoint version unknown") + } + endpoint := Endpoint{Version: binding.Version, URL: url} + + switch binding.EndpointType { + case etUsernamePassword: + updateEndpoint(&userPass, endpoint) + case etWindowsTransport: + updateEndpoint(&windows, endpoint) + default: + return Endpoint{}, Endpoint{}, errors.New("found unknown port type in MEX document") + } + } + } + return userPass, windows, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go new file mode 100644 index 000000000000..47cd4c692d62 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package wstrust provides a client for communicating with a WSTrust (https://en.wikipedia.org/wiki/WS-Trust#:~:text=WS%2DTrust%20is%20a%20WS,in%20a%20secure%20message%20exchange.) +for the purposes of extracting metadata from the service. This data can be used to acquire +tokens using the accesstokens.Client.GetAccessTokenFromSamlGrant() call. +*/ +package wstrust + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" +) + +type xmlCaller interface { + XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error + SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error +} + +type SamlTokenInfo struct { + AssertionType string // Should be either constants SAMLV1Grant or SAMLV2Grant. + Assertion string +} + +// Client represents the REST calls to get tokens from token generator backends. +type Client struct { + // Comm provides the HTTP transport client. + Comm xmlCaller +} + +// TODO(msal): This allows me to call Mex without having a real Def file on line 45. +// This would fail because policies() would not find a policy. This is easy enough to +// fix in test data, but.... Definitions is defined with built in structs. That needs +// to be pulled apart and until then I have this hack in. +var newFromDef = defs.NewFromDef + +// Mex provides metadata about a wstrust service. +func (c Client) Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) { + resp := defs.Definitions{} + err := c.Comm.XMLCall( + ctx, + federationMetadataURL, + http.Header{}, + nil, + &resp, + ) + if err != nil { + return defs.MexDocument{}, err + } + + return newFromDef(resp) +} + +const ( + SoapActionDefault = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue" + + // Note: Commented out because this action is not supported. It was in the original code + // but only used in a switch where it errored. Since there was only one value, a default + // worked better. However, buildTokenRequestMessage() had 2005 support. I'm not actually + // sure what's going on here. It like we have half support. For now this is here just + // for documentation purposes in case we are going to add support. + // + // SoapActionWSTrust2005 = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue" +) + +// SAMLTokenInfo provides SAML information that is used to generate a SAML token. +func (c Client) SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (SamlTokenInfo, error) { + var wsTrustRequestMessage string + var err error + + switch authParameters.AuthorizationType { + case authority.ATWindowsIntegrated: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageWIA(cloudAudienceURN) + if err != nil { + return SamlTokenInfo{}, err + } + case authority.ATUsernamePassword: + wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageUsernamePassword( + cloudAudienceURN, authParameters.Username, authParameters.Password) + if err != nil { + return SamlTokenInfo{}, err + } + default: + return SamlTokenInfo{}, fmt.Errorf("unknown auth type %v", authParameters.AuthorizationType) + } + + var soapAction string + switch endpoint.Version { + case defs.Trust13: + soapAction = SoapActionDefault + case defs.Trust2005: + return SamlTokenInfo{}, errors.New("WS Trust 2005 support is not implemented") + default: + return SamlTokenInfo{}, fmt.Errorf("the SOAP endpoint for a wstrust call had an invalid version: %v", endpoint.Version) + } + + resp := defs.SAMLDefinitions{} + err = c.Comm.SOAPCall(ctx, endpoint.URL, soapAction, http.Header{}, nil, wsTrustRequestMessage, &resp) + if err != nil { + return SamlTokenInfo{}, err + } + + return c.samlAssertion(resp) +} + +const ( + samlv1Assertion = "urn:oasis:names:tc:SAML:1.0:assertion" + samlv2Assertion = "urn:oasis:names:tc:SAML:2.0:assertion" +) + +func (c Client) samlAssertion(def defs.SAMLDefinitions) (SamlTokenInfo, error) { + for _, tokenResponse := range def.Body.RequestSecurityTokenResponseCollection.RequestSecurityTokenResponse { + token := tokenResponse.RequestedSecurityToken + if token.Assertion.XMLName.Local != "" { + assertion := token.AssertionRawXML + + samlVersion := token.Assertion.Saml + switch samlVersion { + case samlv1Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV1, Assertion: assertion}, nil + case samlv2Assertion: + return SamlTokenInfo{AssertionType: grant.SAMLV2, Assertion: assertion}, nil + } + return SamlTokenInfo{}, fmt.Errorf("couldn't parse SAML assertion, version unknown: %q", samlVersion) + } + } + return SamlTokenInfo{}, errors.New("unknown WS-Trust version") +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go new file mode 100644 index 000000000000..893ef4814f78 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -0,0 +1,152 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// TODO(msal): Write some tests. The original code this came from didn't have tests and I'm too +// tired at this point to do it. It, like many other *Manager code I found was broken because +// they didn't have mutex protection. + +package oauth + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" +) + +// ADFS is an active directory federation service authority type. +const ADFS = "ADFS" + +type cacheEntry struct { + Endpoints authority.Endpoints + ValidForDomainsInList map[string]bool +} + +func createcacheEntry(endpoints authority.Endpoints) cacheEntry { + return cacheEntry{endpoints, map[string]bool{}} +} + +// AuthorityEndpoint retrieves endpoints from an authority for auth and token acquisition. +type authorityEndpoint struct { + rest *ops.REST + + mu sync.Mutex + cache map[string]cacheEntry +} + +// newAuthorityEndpoint is the constructor for AuthorityEndpoint. +func newAuthorityEndpoint(rest *ops.REST) *authorityEndpoint { + m := &authorityEndpoint{rest: rest, cache: map[string]cacheEntry{}} + return m +} + +// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance +func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) { + if authorityInfo.AuthorityType == ADFS && len(userPrincipalName) == 0 { + return authority.Endpoints{}, errors.New("UPN required for authority validation for ADFS") + } + + if endpoints, found := m.cachedEndpoints(authorityInfo, userPrincipalName); found { + return endpoints, nil + } + + endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName) + if err != nil { + return authority.Endpoints{}, err + } + + resp, err := m.rest.Authority().GetTenantDiscoveryResponse(ctx, endpoint) + if err != nil { + return authority.Endpoints{}, err + } + if err := resp.Validate(); err != nil { + return authority.Endpoints{}, fmt.Errorf("ResolveEndpoints(): %w", err) + } + + tenant := authorityInfo.Tenant + + endpoints := authority.NewEndpoints( + strings.Replace(resp.AuthorizationEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.TokenEndpoint, "{tenant}", tenant, -1), + strings.Replace(resp.Issuer, "{tenant}", tenant, -1), + authorityInfo.Host) + + m.addCachedEndpoints(authorityInfo, userPrincipalName, endpoints) + + return endpoints, nil +} + +// cachedEndpoints returns a the cached endpoints if they exists. If not, we return false. +func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, bool) { + m.mu.Lock() + defer m.mu.Unlock() + + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + if authorityInfo.AuthorityType == ADFS { + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok { + return cacheEntry.Endpoints, true + } + } + } + return cacheEntry.Endpoints, true + } + return authority.Endpoints{}, false +} + +func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, userPrincipalName string, endpoints authority.Endpoints) { + m.mu.Lock() + defer m.mu.Unlock() + + updatedCacheEntry := createcacheEntry(endpoints) + + if authorityInfo.AuthorityType == ADFS { + // Since we're here, we've made a call to the backend. We want to ensure we're caching + // the latest values from the server. + if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { + for k := range cacheEntry.ValidForDomainsInList { + updatedCacheEntry.ValidForDomainsInList[k] = true + } + } + domain, err := adfsDomainFromUpn(userPrincipalName) + if err == nil { + updatedCacheEntry.ValidForDomainsInList[domain] = true + } + } + + m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry +} + +func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) { + if authorityInfo.Tenant == "adfs" { + return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil + } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + } else if authorityInfo.Region != "" { + resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) + if err != nil { + return "", err + } + return resp.TenantDiscoveryEndpoint, nil + + } + + return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil +} + +func adfsDomainFromUpn(userPrincipalName string) (string, error) { + parts := strings.Split(userPrincipalName, "@") + if len(parts) < 2 { + return "", errors.New("no @ present in user principal name") + } + return parts[1], nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go new file mode 100644 index 000000000000..f7e12a71bf31 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go @@ -0,0 +1,71 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package shared + +import ( + "net/http" + "reflect" + "strings" +) + +const ( + // CacheKeySeparator is used in creating the keys of the cache. + CacheKeySeparator = "-" +) + +type Account struct { + HomeAccountID string `json:"home_account_id,omitempty"` + Environment string `json:"environment,omitempty"` + Realm string `json:"realm,omitempty"` + LocalAccountID string `json:"local_account_id,omitempty"` + AuthorityType string `json:"authority_type,omitempty"` + PreferredUsername string `json:"username,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + MiddleName string `json:"middle_name,omitempty"` + Name string `json:"name,omitempty"` + AlternativeID string `json:"alternative_account_id,omitempty"` + RawClientInfo string `json:"client_info,omitempty"` + UserAssertionHash string `json:"user_assertion_hash,omitempty"` + + AdditionalFields map[string]interface{} +} + +// NewAccount creates an account. +func NewAccount(homeAccountID, env, realm, localAccountID, authorityType, username string) Account { + return Account{ + HomeAccountID: homeAccountID, + Environment: env, + Realm: realm, + LocalAccountID: localAccountID, + AuthorityType: authorityType, + PreferredUsername: username, + } +} + +// Key creates the key for storing accounts in the cache. +func (acc Account) Key() string { + return strings.Join([]string{acc.HomeAccountID, acc.Environment, acc.Realm}, CacheKeySeparator) +} + +// IsZero checks the zero value of account. +func (acc Account) IsZero() bool { + v := reflect.ValueOf(acc) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + if !field.IsZero() { + switch field.Kind() { + case reflect.Map, reflect.Slice: + if field.Len() == 0 { + continue + } + } + return false + } + } + return true +} + +// DefaultClient is our default shared HTTP client. +var DefaultClient = &http.Client{} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go new file mode 100644 index 000000000000..5e1ea912912d --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -0,0 +1,8 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +// Package version keeps the version number of the client package. +package version + +// Version is the version of this client package that is communicated to the server. +const Version = "0.7.0" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go new file mode 100644 index 000000000000..19118c25a2c9 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -0,0 +1,398 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package public provides a client for authentication of "public" applications. A "public" +application is defined as an app that runs on client devices (android, ios, windows, linux, ...). +These devices are "untrusted" and access resources via web APIs that must authenticate. +*/ +package public + +/* +Design note: + +public.Client uses client.Base as an embedded type. client.Base statically assigns its attributes +during creation. As it doesn't have any pointers in it, anything borrowed from it, such as +Base.AuthParams is a copy that is free to be manipulated here. +*/ + +// TODO(msal): This should have example code for each method on client using Go's example doc framework. +// base usage details should be includee in the package documentation. + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/url" + "strconv" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" + "github.com/google/uuid" + "github.com/pkg/browser" +) + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type Account = shared.Account + +// Options configures the Client's behavior. +type Options struct { + // Accessor controls cache persistence. By default there is no cache persistence. + // This can be set with the WithCache() option. + Accessor cache.ExportReplace + + // The host of the Azure Active Directory authority. The default is https://login.microsoftonline.com/common. + // This can be changed with the WithAuthority() option. + Authority string + + // The HTTP client used for making requests. + // It defaults to a shared http.Client. + HTTPClient ops.HTTPClient +} + +func (p *Options) validate() error { + u, err := url.Parse(p.Authority) + if err != nil { + return fmt.Errorf("Authority options cannot be URL parsed: %w", err) + } + if u.Scheme != "https" { + return fmt.Errorf("Authority(%s) did not start with https://", u.String()) + } + return nil +} + +// Option is an optional argument to the New constructor. +type Option func(o *Options) + +// WithAuthority allows for a custom authority to be set. This must be a valid https url. +func WithAuthority(authority string) Option { + return func(o *Options) { + o.Authority = authority + } +} + +// WithCache allows you to set some type of cache for storing authentication tokens. +func WithCache(accessor cache.ExportReplace) Option { + return func(o *Options) { + o.Accessor = accessor + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) Option { + return func(o *Options) { + o.HTTPClient = httpClient + } +} + +// Client is a representation of authentication client for public applications as defined in the +// package doc. For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications. +type Client struct { + base base.Client +} + +// New is the constructor for Client. +func New(clientID string, options ...Option) (Client, error) { + opts := Options{ + Authority: base.AuthorityPublicCloud, + HTTPClient: shared.DefaultClient, + } + + for _, o := range options { + o(&opts) + } + if err := opts.validate(); err != nil { + return Client{}, err + } + + base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), base.WithCacheAccessor(opts.Accessor)) + if err != nil { + return Client{}, err + } + return Client{base}, nil +} + +// CreateAuthCodeURL creates a URL used to acquire an authorization code. +func (pca Client) CreateAuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string) (string, error) { + return pca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, pca.base.AuthParams) +} + +// AcquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call. +// These are set by using various AcquireTokenSilentOption functions. +type AcquireTokenSilentOptions struct { + // Account represents the account to use. To set, use the WithSilentAccount() option. + Account Account +} + +// AcquireTokenSilentOption changes options inside AcquireTokenSilentOptions used in .AcquireTokenSilent(). +type AcquireTokenSilentOption func(a *AcquireTokenSilentOptions) + +// WithSilentAccount uses the passed account during an AcquireTokenSilent() call. +func WithSilentAccount(account Account) AcquireTokenSilentOption { + return func(a *AcquireTokenSilentOptions) { + a.Account = account + } +} + +// AcquireTokenSilent acquires a token from either the cache or using a refresh token. +func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, options ...AcquireTokenSilentOption) (AuthResult, error) { + opts := AcquireTokenSilentOptions{} + for _, o := range options { + o(&opts) + } + + silentParameters := base.AcquireTokenSilentParameters{ + Scopes: scopes, + Account: opts.Account, + RequestType: accesstokens.ATPublic, + IsAppCache: false, + } + + return pca.base.AcquireTokenSilent(ctx, silentParameters) +} + +// AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication. +// NOTE: this flow is NOT recommended. +func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string) (AuthResult, error) { + authParams := pca.base.AuthParams + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATUsernamePassword + authParams.Username = username + authParams.Password = password + + token, err := pca.base.Token.UsernamePassword(ctx, authParams) + if err != nil { + return AuthResult{}, err + } + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type DeviceCodeResult = accesstokens.DeviceCodeResult + +// DeviceCode provides the results of the device code flows first stage (containing the code) +// that must be entered on the second device and provides a method to retrieve the AuthenticationResult +// once that code has been entered and verified. +type DeviceCode struct { + // Result holds the information about the device code (such as the code). + Result DeviceCodeResult + + authParams authority.AuthParams + client Client + dc oauth.DeviceCode +} + +// AuthenticationResult retreives the AuthenticationResult once the user enters the code +// on the second device. Until then it blocks until the .AcquireTokenByDeviceCode() context +// is cancelled or the token expires. +func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error) { + token, err := d.dc.Token(ctx) + if err != nil { + return AuthResult{}, err + } + return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true) +} + +// AcquireTokenByDeviceCode acquires a security token from the authority, by acquiring a device code and using that to acquire the token. +// Users need to create an AcquireTokenDeviceCodeParameters instance and pass it in. +func (pca Client) AcquireTokenByDeviceCode(ctx context.Context, scopes []string) (DeviceCode, error) { + authParams := pca.base.AuthParams + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATDeviceCode + + dc, err := pca.base.Token.DeviceCode(ctx, authParams) + if err != nil { + return DeviceCode{}, err + } + + return DeviceCode{Result: dc.Result, authParams: authParams, client: pca, dc: dc}, nil +} + +// AcquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. +type AcquireTokenByAuthCodeOptions struct { + Challenge string +} + +// AcquireTokenByAuthCodeOption changes options inside AcquireTokenByAuthCodeOptions used in .AcquireTokenByAuthCode(). +type AcquireTokenByAuthCodeOption func(a *AcquireTokenByAuthCodeOptions) + +// WithChallenge allows you to provide a code for the .AcquireTokenByAuthCode() call. +func WithChallenge(challenge string) AcquireTokenByAuthCodeOption { + return func(a *AcquireTokenByAuthCodeOptions) { + a.Challenge = challenge + } +} + +// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code. +// The specified redirect URI must be the same URI that was used when the authorization code was requested. +func (pca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...AcquireTokenByAuthCodeOption) (AuthResult, error) { + opts := AcquireTokenByAuthCodeOptions{} + for _, o := range options { + o(&opts) + } + + params := base.AcquireTokenAuthCodeParameters{ + Scopes: scopes, + Code: code, + Challenge: opts.Challenge, + AppType: accesstokens.ATPublic, + RedirectURI: redirectURI, + } + + return pca.base.AcquireTokenByAuthCode(ctx, params) +} + +// Accounts gets all the accounts in the token cache. +// If there are no accounts in the cache the returned slice is empty. +func (pca Client) Accounts() []Account { + return pca.base.AllAccounts() +} + +// RemoveAccount signs the account out and forgets account from token cache. +func (pca Client) RemoveAccount(account Account) error { + pca.base.RemoveAccount(account) + return nil +} + +// InteractiveAuthOptions contains the optional parameters used to acquire an access token for interactive auth code flow. +type InteractiveAuthOptions struct { + // Used to specify a custom port for the local server. http://localhost:portnumber + // All other URI components are ignored. + RedirectURI string +} + +// InteractiveAuthOption changes options inside InteractiveAuthOptions used in .AcquireTokenInteractive(). +type InteractiveAuthOption func(*InteractiveAuthOptions) + +// WithRedirectURI uses the specified redirect URI for interactive auth. +func WithRedirectURI(redirectURI string) InteractiveAuthOption { + return func(o *InteractiveAuthOptions) { + o.RedirectURI = redirectURI + } +} + +// AcquireTokenInteractive acquires a security token from the authority using the default web browser to select the account. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/msal-authentication-flows#interactive-and-non-interactive-authentication +func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, options ...InteractiveAuthOption) (AuthResult, error) { + opts := InteractiveAuthOptions{} + for _, opt := range options { + opt(&opts) + } + // the code verifier is a random 32-byte sequence that's been base-64 encoded without padding. + // it's used to prevent MitM attacks during auth code flow, see https://tools.ietf.org/html/rfc7636 + cv, challenge, err := codeVerifier() + if err != nil { + return AuthResult{}, err + } + var redirectURL *url.URL + if opts.RedirectURI != "" { + redirectURL, err = url.Parse(opts.RedirectURI) + if err != nil { + return AuthResult{}, err + } + } + authParams := pca.base.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer. + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATInteractive + authParams.CodeChallenge = challenge + authParams.CodeChallengeMethod = "S256" + authParams.State = uuid.New().String() + authParams.Prompt = "select_account" + res, err := pca.browserLogin(ctx, redirectURL, authParams) + if err != nil { + return AuthResult{}, err + } + authParams.Redirecturi = res.redirectURI + + req, err := accesstokens.NewCodeChallengeRequest(authParams, accesstokens.ATPublic, nil, res.authCode, cv) + if err != nil { + return AuthResult{}, err + } + + token, err := pca.base.Token.AuthCode(ctx, req) + if err != nil { + return AuthResult{}, err + } + + return pca.base.AuthResultFromToken(ctx, authParams, token, true) +} + +type interactiveAuthResult struct { + authCode string + redirectURI string +} + +// provides a test hook to simulate opening a browser +var browserOpenURL = func(authURL string) error { + return browser.OpenURL(authURL) +} + +// parses the port number from the provided URL. +// returns 0 if nil or no port is specified. +func parsePort(u *url.URL) (int, error) { + if u == nil { + return 0, nil + } + p := u.Port() + if p == "" { + return 0, nil + } + return strconv.Atoi(p) +} + +// browserLogin launches the system browser for interactive login +func (pca Client) browserLogin(ctx context.Context, redirectURI *url.URL, params authority.AuthParams) (interactiveAuthResult, error) { + // start local redirect server so login can call us back + port, err := parsePort(redirectURI) + if err != nil { + return interactiveAuthResult{}, err + } + srv, err := local.New(params.State, port) + if err != nil { + return interactiveAuthResult{}, err + } + defer srv.Shutdown() + params.Scopes = accesstokens.AppendDefaultScopes(params) + authURL, err := pca.base.AuthCodeURL(ctx, params.ClientID, srv.Addr, params.Scopes, params) + if err != nil { + return interactiveAuthResult{}, err + } + // open browser window so user can select credentials + if err := browserOpenURL(authURL); err != nil { + return interactiveAuthResult{}, err + } + // now wait until the logic calls us back + res := srv.Result(ctx) + if res.Err != nil { + return interactiveAuthResult{}, res.Err + } + return interactiveAuthResult{ + authCode: res.Code, + redirectURI: srv.Addr, + }, nil +} + +// creates a code verifier string along with its SHA256 hash which +// is used as the challenge when requesting an auth code. +// used in interactive auth flow for PKCE. +func codeVerifier() (codeVerifier string, challenge string, err error) { + cvBytes := make([]byte, 32) + if _, err = rand.Read(cvBytes); err != nil { + return + } + codeVerifier = base64.RawURLEncoding.EncodeToString(cvBytes) + // for PKCE, create a hash of the code verifier + cvh := sha256.Sum256([]byte(codeVerifier)) + challenge = base64.RawURLEncoding.EncodeToString(cvh[:]) + return +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE b/vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE new file mode 100644 index 000000000000..d46e9d128cdb --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/LICENSE @@ -0,0 +1,14 @@ +Copyright (c) 2015 aliyun.com + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go new file mode 100644 index 000000000000..bc1e4fa3f607 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go @@ -0,0 +1,190 @@ +package oss + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "fmt" + "hash" + "io" + "net/http" + "sort" + "strconv" + "strings" +) + +// headerSorter defines the key-value structure for storing the sorted data in signHeader. +type headerSorter struct { + Keys []string + Vals []string +} + +// getAdditionalHeaderKeys get exist key in http header +func (conn Conn) getAdditionalHeaderKeys(req *http.Request) ([]string, map[string]string) { + var keysList []string + keysMap := make(map[string]string) + srcKeys := make(map[string]string) + + for k := range req.Header { + srcKeys[strings.ToLower(k)] = "" + } + + for _, v := range conn.config.AdditionalHeaders { + if _, ok := srcKeys[strings.ToLower(v)]; ok { + keysMap[strings.ToLower(v)] = "" + } + } + + for k := range keysMap { + keysList = append(keysList, k) + } + sort.Strings(keysList) + return keysList, keysMap +} + +// signHeader signs the header and sets it as the authorization header. +func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) { + akIf := conn.config.GetCredentials() + authorizationStr := "" + if conn.config.AuthVersion == AuthV2 { + additionalList, _ := conn.getAdditionalHeaderKeys(req) + if len(additionalList) > 0 { + authorizationFmt := "OSS2 AccessKeyId:%v,AdditionalHeaders:%v,Signature:%v" + additionnalHeadersStr := strings.Join(additionalList, ";") + authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), additionnalHeadersStr, conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())) + } else { + authorizationFmt := "OSS2 AccessKeyId:%v,Signature:%v" + authorizationStr = fmt.Sprintf(authorizationFmt, akIf.GetAccessKeyID(), conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret())) + } + } else { + // Get the final authorization string + authorizationStr = "OSS " + akIf.GetAccessKeyID() + ":" + conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()) + } + + // Give the parameter "Authorization" value + req.Header.Set(HTTPHeaderAuthorization, authorizationStr) +} + +func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string { + // Find out the "x-oss-"'s address in header of the request + ossHeadersMap := make(map[string]string) + additionalList, additionalMap := conn.getAdditionalHeaderKeys(req) + for k, v := range req.Header { + if strings.HasPrefix(strings.ToLower(k), "x-oss-") { + ossHeadersMap[strings.ToLower(k)] = v[0] + } else if conn.config.AuthVersion == AuthV2 { + if _, ok := additionalMap[strings.ToLower(k)]; ok { + ossHeadersMap[strings.ToLower(k)] = v[0] + } + } + } + hs := newHeaderSorter(ossHeadersMap) + + // Sort the ossHeadersMap by the ascending order + hs.Sort() + + // Get the canonicalizedOSSHeaders + canonicalizedOSSHeaders := "" + for i := range hs.Keys { + canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n" + } + + // Give other parameters values + // when sign URL, date is expires + date := req.Header.Get(HTTPHeaderDate) + contentType := req.Header.Get(HTTPHeaderContentType) + contentMd5 := req.Header.Get(HTTPHeaderContentMD5) + + // default is v1 signature + signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret)) + + // v2 signature + if conn.config.AuthVersion == AuthV2 { + signStr = req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + strings.Join(additionalList, ";") + "\n" + canonicalizedResource + h = hmac.New(func() hash.Hash { return sha256.New() }, []byte(keySecret)) + } + + // convert sign to log for easy to view + if conn.config.LogLevel >= Debug { + var signBuf bytes.Buffer + for i := 0; i < len(signStr); i++ { + if signStr[i] != '\n' { + signBuf.WriteByte(signStr[i]) + } else { + signBuf.WriteString("\\n") + } + } + conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, signBuf.String()) + } + + io.WriteString(h, signStr) + signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + return signedStr +} + +func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, keySecret string, params map[string]interface{}) string { + if params[HTTPParamAccessKeyID] == nil { + return "" + } + + canonResource := fmt.Sprintf("/%s/%s", bucketName, channelName) + canonParamsKeys := []string{} + for key := range params { + if key != HTTPParamAccessKeyID && key != HTTPParamSignature && key != HTTPParamExpires && key != HTTPParamSecurityToken { + canonParamsKeys = append(canonParamsKeys, key) + } + } + + sort.Strings(canonParamsKeys) + canonParamsStr := "" + for _, key := range canonParamsKeys { + canonParamsStr = fmt.Sprintf("%s%s:%s\n", canonParamsStr, key, params[key].(string)) + } + + expireStr := strconv.FormatInt(expiration, 10) + signStr := expireStr + "\n" + canonParamsStr + canonResource + + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret)) + io.WriteString(h, signStr) + signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) + return signedStr +} + +// newHeaderSorter is an additional function for function SignHeader. +func newHeaderSorter(m map[string]string) *headerSorter { + hs := &headerSorter{ + Keys: make([]string, 0, len(m)), + Vals: make([]string, 0, len(m)), + } + + for k, v := range m { + hs.Keys = append(hs.Keys, k) + hs.Vals = append(hs.Vals, v) + } + return hs +} + +// Sort is an additional function for function SignHeader. +func (hs *headerSorter) Sort() { + sort.Sort(hs) +} + +// Len is an additional function for function SignHeader. +func (hs *headerSorter) Len() int { + return len(hs.Vals) +} + +// Less is an additional function for function SignHeader. +func (hs *headerSorter) Less(i, j int) bool { + return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0 +} + +// Swap is an additional function for function SignHeader. +func (hs *headerSorter) Swap(i, j int) { + hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i] + hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i] +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go new file mode 100644 index 000000000000..430252c02f9f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go @@ -0,0 +1,1289 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/xml" + "fmt" + "hash" + "hash/crc64" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" +) + +// Bucket implements the operations of object. +type Bucket struct { + Client Client + BucketName string +} + +// PutObject creates a new object and it will overwrite the original one if it exists already. +// +// objectKey the object key in UTF-8 encoding. The length must be between 1 and 1023, and cannot start with "/" or "\". +// reader io.Reader instance for reading the data for uploading +// options the options for uploading the object. The valid options here are CacheControl, ContentDisposition, ContentEncoding +// Expires, ServerSideEncryption, ObjectACL and Meta. Refer to the link below for more details. +// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error { + opts := AddContentType(options, objectKey) + + request := &PutObjectRequest{ + ObjectKey: objectKey, + Reader: reader, + } + resp, err := bucket.DoPutObject(request, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// PutObjectFromFile creates a new object from the local file. +// +// objectKey object key. +// filePath the local file path to upload. +// options the options for uploading the object. Refer to the parameter options in PutObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error { + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + opts := AddContentType(options, filePath, objectKey) + + request := &PutObjectRequest{ + ObjectKey: objectKey, + Reader: fd, + } + resp, err := bucket.DoPutObject(request, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// DoPutObject does the actual upload work. +// +// request the request instance for uploading an object. +// options the options for uploading an object. +// +// Response the response from OSS. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) { + isOptSet, _, _ := IsOptionSet(options, HTTPHeaderContentType) + if !isOptSet { + options = AddContentType(options, request.ObjectKey) + } + + listener := GetProgressListener(options) + + params := map[string]interface{}{} + resp, err := bucket.do("PUT", request.ObjectKey, params, options, request.Reader, listener) + if err != nil { + return nil, err + } + + if bucket.GetConfig().IsEnableCRC { + err = CheckCRC(resp, "DoPutObject") + if err != nil { + return resp, err + } + } + + err = CheckRespCode(resp.StatusCode, []int{http.StatusOK}) + + return resp, err +} + +// GetObject downloads the object. +// +// objectKey the object key. +// options the options for downloading the object. The valid values are: Range, IfModifiedSince, IfUnmodifiedSince, IfMatch, +// IfNoneMatch, AcceptEncoding. For more details, please check out: +// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html +// +// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) { + result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) + if err != nil { + return nil, err + } + + return result.Response, nil +} + +// GetObjectToFile downloads the data to a local file. +// +// objectKey the object key to download. +// filePath the local file to store the object data. +// options the options for downloading the object. Refer to the parameter options in method GetObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error { + tempFilePath := filePath + TempFileSuffix + + // Calls the API to actually download the object. Returns the result instance. + result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) + if err != nil { + return err + } + defer result.Response.Close() + + // If the local file does not exist, create a new one. If it exists, overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Copy the data to the local file path. + _, err = io.Copy(fd, result.Response.Body) + fd.Close() + if err != nil { + return err + } + + // Compares the CRC value + hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange) + encodeOpt, _ := FindOption(options, HTTPHeaderAcceptEncoding, nil) + acceptEncoding := "" + if encodeOpt != nil { + acceptEncoding = encodeOpt.(string) + } + if bucket.GetConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" { + result.Response.ClientCRC = result.ClientCRC.Sum64() + err = CheckCRC(result.Response, "GetObjectToFile") + if err != nil { + os.Remove(tempFilePath) + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// DoGetObject is the actual API that gets the object. It's the internal function called by other public APIs. +// +// request the request to download the object. +// options the options for downloading the file. Checks out the parameter options in method GetObject. +// +// GetObjectResult the result instance of getting the object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) { + params, _ := GetRawParams(options) + resp, err := bucket.do("GET", request.ObjectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Response: resp, + } + + // CRC + var crcCalc hash.Hash64 + hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange) + if bucket.GetConfig().IsEnableCRC && !hasRange { + crcCalc = crc64.New(CrcTable()) + result.ServerCRC = resp.ServerCRC + result.ClientCRC = crcCalc + } + + // Progress + listener := GetProgressListener(options) + + contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64) + resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil) + + return result, nil +} + +// CopyObject copies the object inside the bucket. +// +// srcObjectKey the source object to copy. +// destObjectKey the target object to copy. +// options options for copying an object. You can specify the conditions of copy. The valid conditions are CopySourceIfMatch, +// CopySourceIfNoneMatch, CopySourceIfModifiedSince, CopySourceIfUnmodifiedSince, MetadataDirective. +// Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, ObjectACL, Meta. Refer to the link below for more details : +// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { + var out CopyObjectResult + + //first find version id + versionIdKey := "versionId" + versionId, _ := FindOption(options, versionIdKey, nil) + if versionId == nil { + options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) + } else { + options = DeleteOption(options, versionIdKey) + options = append(options, CopySourceVersion(bucket.BucketName, url.QueryEscape(srcObjectKey), versionId.(string))) + } + + params := map[string]interface{}{} + resp, err := bucket.do("PUT", destObjectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// CopyObjectTo copies the object to another bucket. +// +// srcObjectKey source object key. The source bucket is Bucket.BucketName . +// destBucketName target bucket name. +// destObjectKey target object name. +// options copy options, check out parameter options in function CopyObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) { + return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) +} + +// +// CopyObjectFrom copies the object to another bucket. +// +// srcBucketName source bucket name. +// srcObjectKey source object name. +// destObjectKey target object name. The target bucket name is Bucket.BucketName. +// options copy options. Check out parameter options in function CopyObject. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { + destBucketName := bucket.BucketName + var out CopyObjectResult + srcBucket, err := bucket.Client.Bucket(srcBucketName) + if err != nil { + return out, err + } + + return srcBucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) +} + +func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, options ...Option) (CopyObjectResult, error) { + var out CopyObjectResult + + //first find version id + versionIdKey := "versionId" + versionId, _ := FindOption(options, versionIdKey, nil) + if versionId == nil { + options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) + } else { + options = DeleteOption(options, versionIdKey) + options = append(options, CopySourceVersion(bucket.BucketName, url.QueryEscape(srcObjectKey), versionId.(string))) + } + + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return out, err + } + params := map[string]interface{}{} + resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, params, headers, nil, 0, nil) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// AppendObject uploads the data in the way of appending an existing or new object. +// +// AppendObject the parameter appendPosition specifies which postion (in the target object) to append. For the first append (to a non-existing file), +// the appendPosition should be 0. The appendPosition in the subsequent calls will be the current object length. +// For example, the first appendObject's appendPosition is 0 and it uploaded 65536 bytes data, then the second call's position is 65536. +// The response header x-oss-next-append-position after each successful request also specifies the next call's append position (so the caller need not to maintain this information). +// +// objectKey the target object to append to. +// reader io.Reader. The read instance for reading the data to append. +// appendPosition the start position to append. +// destObjectProperties the options for the first appending, such as CacheControl, ContentDisposition, ContentEncoding, +// Expires, ServerSideEncryption, ObjectACL. +// +// int64 the next append position, it's valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) { + request := &AppendObjectRequest{ + ObjectKey: objectKey, + Reader: reader, + Position: appendPosition, + } + + result, err := bucket.DoAppendObject(request, options) + if err != nil { + return appendPosition, err + } + + return result.NextPosition, err +} + +// DoAppendObject is the actual API that does the object append. +// +// request the request object for appending object. +// options the options for appending object. +// +// AppendObjectResult the result object for appending object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) { + params := map[string]interface{}{} + params["append"] = nil + params["position"] = strconv.FormatInt(request.Position, 10) + headers := make(map[string]string) + + opts := AddContentType(options, request.ObjectKey) + handleOptions(headers, opts) + + var initCRC uint64 + isCRCSet, initCRCOpt, _ := IsOptionSet(options, initCRC64) + if isCRCSet { + initCRC = initCRCOpt.(uint64) + } + + listener := GetProgressListener(options) + + handleOptions(headers, opts) + resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, headers, + request.Reader, initCRC, listener) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + if err != nil { + return nil, err + } + defer resp.Body.Close() + + nextPosition, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderOssNextAppendPosition), 10, 64) + result := &AppendObjectResult{ + NextPosition: nextPosition, + CRC: resp.ServerCRC, + } + + if bucket.GetConfig().IsEnableCRC && isCRCSet { + err = CheckCRC(resp, "AppendObject") + if err != nil { + return result, err + } + } + + return result, nil +} + +// DeleteObject deletes the object. +// +// objectKey the object key to delete. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObject(objectKey string, options ...Option) error { + params, _ := GetRawParams(options) + resp, err := bucket.do("DELETE", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// DeleteObjects deletes multiple objects. +// +// objectKeys the object keys to delete. +// options the options for deleting objects. +// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used. +// +// DeleteObjectsResult the result object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) { + out := DeleteObjectsResult{} + dxml := deleteXML{} + for _, key := range objectKeys { + dxml.Objects = append(dxml.Objects, DeleteObject{Key: key}) + } + + isQuiet, _ := FindOption(options, deleteObjectsQuiet, false) + dxml.Quiet = isQuiet.(bool) + + bs, err := xml.Marshal(dxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + sum := md5.Sum(bs) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + options = append(options, ContentMD5(b64)) + + params := map[string]interface{}{} + params["delete"] = nil + params["encoding-type"] = "url" + + resp, err := bucket.do("POST", "", params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + deletedResult := DeleteObjectVersionsResult{} + if !dxml.Quiet { + if err = xmlUnmarshal(resp.Body, &deletedResult); err == nil { + err = decodeDeleteObjectsResult(&deletedResult) + } + } + + // Keep compatibility:need convert to struct DeleteObjectsResult + out.XMLName = deletedResult.XMLName + for _, v := range deletedResult.DeletedObjectsDetail { + out.DeletedObjects = append(out.DeletedObjects, v.Key) + } + + return out, err +} + +// DeleteObjectVersions deletes multiple object versions. +// +// objectVersions the object keys and versions to delete. +// options the options for deleting objects. +// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used. +// +// DeleteObjectVersionsResult the result object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObjectVersions(objectVersions []DeleteObject, options ...Option) (DeleteObjectVersionsResult, error) { + out := DeleteObjectVersionsResult{} + dxml := deleteXML{} + dxml.Objects = objectVersions + + isQuiet, _ := FindOption(options, deleteObjectsQuiet, false) + dxml.Quiet = isQuiet.(bool) + + bs, err := xml.Marshal(dxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + sum := md5.Sum(bs) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + options = append(options, ContentMD5(b64)) + + params := map[string]interface{}{} + params["delete"] = nil + params["encoding-type"] = "url" + + resp, err := bucket.do("POST", "", params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + if !dxml.Quiet { + if err = xmlUnmarshal(resp.Body, &out); err == nil { + err = decodeDeleteObjectsResult(&out) + } + } + return out, err +} + +// IsObjectExist checks if the object exists. +// +// bool flag of object's existence (true:exists; false:non-exist) when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) IsObjectExist(objectKey string, options ...Option) (bool, error) { + _, err := bucket.GetObjectMeta(objectKey, options...) + if err == nil { + return true, nil + } + + switch err.(type) { + case ServiceError: + if err.(ServiceError).StatusCode == 404 { + return false, nil + } + } + + return false, err +} + +// ListObjects lists the objects under the current bucket. +// +// options it contains all the filters for listing objects. +// It could specify a prefix filter on object keys, the max keys count to return and the object key marker and the delimiter for grouping object names. +// The key marker means the returned objects' key must be greater than it in lexicographic order. +// +// For example, if the bucket has 8 objects, my-object-1, my-object-11, my-object-2, my-object-21, +// my-object-22, my-object-3, my-object-31, my-object-32. If the prefix is my-object-2 (no other filters), then it returns +// my-object-2, my-object-21, my-object-22 three objects. If the marker is my-object-22 (no other filters), then it returns +// my-object-3, my-object-31, my-object-32 three objects. If the max keys is 5, then it returns 5 objects. +// The three filters could be used together to achieve filter and paging functionality. +// If the prefix is the folder name, then it could list all files under this folder (including the files under its subfolders). +// But if the delimiter is specified with '/', then it only returns that folder's files (no subfolder's files). The direct subfolders are in the commonPrefixes properties. +// For example, if the bucket has three objects fun/test.jpg, fun/movie/001.avi, fun/movie/007.avi. And if the prefix is "fun/", then it returns all three objects. +// But if the delimiter is '/', then only "fun/test.jpg" is returned as files and fun/movie/ is returned as common prefix. +// +// For common usage scenario, check out sample/list_object.go. +// +// ListObjectsResult the return value after operation succeeds (only valid when error is nil). +// +func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) { + var out ListObjectsResult + + options = append(options, EncodingType("url")) + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectsResult(&out) + return out, err +} + +// Recommend to use ListObjectsV2 to replace ListObjects +// ListOListObjectsV2bjects lists the objects under the current bucket. +// ListObjectsResultV2 the return value after operation succeeds (only valid when error is nil). +func (bucket Bucket) ListObjectsV2(options ...Option) (ListObjectsResultV2, error) { + var out ListObjectsResultV2 + + options = append(options, EncodingType("url")) + options = append(options, ListType(2)) + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectsResultV2(&out) + return out, err +} + +// ListObjectVersions lists objects of all versions under the current bucket. +func (bucket Bucket) ListObjectVersions(options ...Option) (ListObjectVersionsResult, error) { + var out ListObjectVersionsResult + + options = append(options, EncodingType("url")) + params, err := GetRawParams(options) + if err != nil { + return out, err + } + params["versions"] = nil + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectVersionsResult(&out) + return out, err +} + +// SetObjectMeta sets the metadata of the Object. +// +// objectKey object +// options options for setting the metadata. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, and custom metadata. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error { + options = append(options, MetadataDirective(MetaReplace)) + _, err := bucket.CopyObject(objectKey, objectKey, options...) + return err +} + +// GetObjectDetailedMeta gets the object's detailed metadata +// +// objectKey object key. +// options the constraints of the object. Only when the object meets the requirements this method will return the metadata. Otherwise returns error. Valid options are IfModifiedSince, IfUnmodifiedSince, +// IfMatch, IfNoneMatch. For more details check out https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html +// +// http.Header object meta when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) { + params, _ := GetRawParams(options) + resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return resp.Headers, nil +} + +// GetObjectMeta gets object metadata. +// +// GetObjectMeta is more lightweight than GetObjectDetailedMeta as it only returns basic metadata including ETag +// size, LastModified. The size information is in the HTTP header Content-Length. +// +// objectKey object key +// +// http.Header the object's metadata, valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectMeta(objectKey string, options ...Option) (http.Header, error) { + params, _ := GetRawParams(options) + params["objectMeta"] = nil + //resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil) + resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return resp.Headers, nil +} + +// SetObjectACL updates the object's ACL. +// +// Only the bucket's owner could update object's ACL which priority is higher than bucket's ACL. +// For example, if the bucket ACL is private and object's ACL is public-read-write. +// Then object's ACL is used and it means all users could read or write that object. +// When the object's ACL is not set, then bucket's ACL is used as the object's ACL. +// +// Object read operations include GetObject, HeadObject, CopyObject and UploadPartCopy on the source object; +// Object write operations include PutObject, PostObject, AppendObject, DeleteObject, DeleteMultipleObjects, +// CompleteMultipartUpload and CopyObject on target object. +// +// objectKey the target object key (to set the ACL on) +// objectAcl object ACL. Valid options are PrivateACL, PublicReadACL, PublicReadWriteACL. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType, options ...Option) error { + options = append(options, ObjectACL(objectACL)) + params, _ := GetRawParams(options) + params["acl"] = nil + resp, err := bucket.do("PUT", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetObjectACL gets object's ACL +// +// objectKey the object to get ACL from. +// +// GetObjectACLResult the result object when error is nil. GetObjectACLResult.Acl is the object ACL. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectACL(objectKey string, options ...Option) (GetObjectACLResult, error) { + var out GetObjectACLResult + params, _ := GetRawParams(options) + params["acl"] = nil + resp, err := bucket.do("GET", objectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// PutSymlink creates a symlink (to point to an existing object) +// +// Symlink cannot point to another symlink. +// When creating a symlink, it does not check the existence of the target file, and does not check if the target file is symlink. +// Neither it checks the caller's permission on the target file. All these checks are deferred to the actual GetObject call via this symlink. +// If trying to add an existing file, as long as the caller has the write permission, the existing one will be overwritten. +// If the x-oss-meta- is specified, it will be added as the metadata of the symlink file. +// +// symObjectKey the symlink object's key. +// targetObjectKey the target object key to point to. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, options ...Option) error { + options = append(options, symlinkTarget(url.QueryEscape(targetObjectKey))) + params, _ := GetRawParams(options) + params["symlink"] = nil + resp, err := bucket.do("PUT", symObjectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetSymlink gets the symlink object with the specified key. +// If the symlink object does not exist, returns 404. +// +// objectKey the symlink object's key. +// +// error it's nil if no error, otherwise it's an error object. +// When error is nil, the target file key is in the X-Oss-Symlink-Target header of the returned object. +// +func (bucket Bucket) GetSymlink(objectKey string, options ...Option) (http.Header, error) { + params, _ := GetRawParams(options) + params["symlink"] = nil + resp, err := bucket.do("GET", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + targetObjectKey := resp.Headers.Get(HTTPHeaderOssSymlinkTarget) + targetObjectKey, err = url.QueryUnescape(targetObjectKey) + if err != nil { + return resp.Headers, err + } + resp.Headers.Set(HTTPHeaderOssSymlinkTarget, targetObjectKey) + return resp.Headers, err +} + +// RestoreObject restores the object from the archive storage. +// +// An archive object is in cold status by default and it cannot be accessed. +// When restore is called on the cold object, it will become available for access after some time. +// If multiple restores are called on the same file when the object is being restored, server side does nothing for additional calls but returns success. +// By default, the restored object is available for access for one day. After that it will be unavailable again. +// But if another RestoreObject are called after the file is restored, then it will extend one day's access time of that object, up to 7 days. +// +// objectKey object key to restore. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) RestoreObject(objectKey string, options ...Option) error { + params, _ := GetRawParams(options) + params["restore"] = nil + resp, err := bucket.do("POST", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted}) +} + +// RestoreObjectDetail support more features than RestoreObject +func (bucket Bucket) RestoreObjectDetail(objectKey string, restoreConfig RestoreConfiguration, options ...Option) error { + if restoreConfig.Tier == "" { + // Expedited, Standard, Bulk + restoreConfig.Tier = string(RestoreStandard) + } + + if restoreConfig.Days == 0 { + restoreConfig.Days = 1 + } + + bs, err := xml.Marshal(restoreConfig) + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + + params, _ := GetRawParams(options) + params["restore"] = nil + + resp, err := bucket.do("POST", objectKey, params, options, buffer, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted}) +} + +// RestoreObjectXML support more features than RestoreObject +func (bucket Bucket) RestoreObjectXML(objectKey, configXML string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(configXML)) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + + params, _ := GetRawParams(options) + params["restore"] = nil + + resp, err := bucket.do("POST", objectKey, params, options, buffer, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted}) +} + +// SignURL signs the URL. Users could access the object directly with this URL without getting the AK. +// +// objectKey the target object to sign. +// signURLConfig the configuration for the signed URL +// +// string returns the signed URL, when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec int64, options ...Option) (string, error) { + if expiredInSec < 0 { + return "", fmt.Errorf("invalid expires: %d, expires must bigger than 0", expiredInSec) + } + expiration := time.Now().Unix() + expiredInSec + + params, err := GetRawParams(options) + if err != nil { + return "", err + } + + headers := make(map[string]string) + err = handleOptions(headers, options) + if err != nil { + return "", err + } + + return bucket.Client.Conn.signURL(method, bucket.BucketName, objectKey, expiration, params, headers), nil +} + +// PutObjectWithURL uploads an object with the URL. If the object exists, it will be overwritten. +// PutObjectWithURL It will not generate minetype according to the key name. +// +// signedURL signed URL. +// reader io.Reader the read instance for reading the data for the upload. +// options the options for uploading the data. The valid options are CacheControl, ContentDisposition, ContentEncoding, +// Expires, ServerSideEncryption, ObjectACL and custom metadata. Check out the following link for details: +// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, options ...Option) error { + resp, err := bucket.DoPutObjectWithURL(signedURL, reader, options) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// PutObjectFromFileWithURL uploads an object from a local file with the signed URL. +// PutObjectFromFileWithURL It does not generate mimetype according to object key's name or the local file name. +// +// signedURL the signed URL. +// filePath local file path, such as dirfile.txt, for uploading. +// options options for uploading, same as the options in PutObject function. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, options ...Option) error { + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + resp, err := bucket.DoPutObjectWithURL(signedURL, fd, options) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// DoPutObjectWithURL is the actual API that does the upload with URL work(internal for SDK) +// +// signedURL the signed URL. +// reader io.Reader the read instance for getting the data to upload. +// options options for uploading. +// +// Response the response object which contains the HTTP response. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, options []Option) (*Response, error) { + listener := GetProgressListener(options) + + params := map[string]interface{}{} + resp, err := bucket.doURL("PUT", signedURL, params, options, reader, listener) + if err != nil { + return nil, err + } + + if bucket.GetConfig().IsEnableCRC { + err = CheckCRC(resp, "DoPutObjectWithURL") + if err != nil { + return resp, err + } + } + + err = CheckRespCode(resp.StatusCode, []int{http.StatusOK}) + + return resp, err +} + +// GetObjectWithURL downloads the object and returns the reader instance, with the signed URL. +// +// signedURL the signed URL. +// options options for downloading the object. Valid options are IfModifiedSince, IfUnmodifiedSince, IfMatch, +// IfNoneMatch, AcceptEncoding. For more information, check out the following link: +// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html +// +// io.ReadCloser the reader object for getting the data from response. It needs be closed after the usage. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.ReadCloser, error) { + result, err := bucket.DoGetObjectWithURL(signedURL, options) + if err != nil { + return nil, err + } + return result.Response, nil +} + +// GetObjectToFileWithURL downloads the object into a local file with the signed URL. +// +// signedURL the signed URL +// filePath the local file path to download to. +// options the options for downloading object. Check out the parameter options in function GetObject for the reference. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options ...Option) error { + tempFilePath := filePath + TempFileSuffix + + // Get the object's content + result, err := bucket.DoGetObjectWithURL(signedURL, options) + if err != nil { + return err + } + defer result.Response.Close() + + // If the file does not exist, create one. If exists, then overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Save the data to the file. + _, err = io.Copy(fd, result.Response.Body) + fd.Close() + if err != nil { + return err + } + + // Compare the CRC value. If CRC values do not match, return error. + hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange) + encodeOpt, _ := FindOption(options, HTTPHeaderAcceptEncoding, nil) + acceptEncoding := "" + if encodeOpt != nil { + acceptEncoding = encodeOpt.(string) + } + + if bucket.GetConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" { + result.Response.ClientCRC = result.ClientCRC.Sum64() + err = CheckCRC(result.Response, "GetObjectToFileWithURL") + if err != nil { + os.Remove(tempFilePath) + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// DoGetObjectWithURL is the actual API that downloads the file with the signed URL. +// +// signedURL the signed URL. +// options the options for getting object. Check out parameter options in GetObject for the reference. +// +// GetObjectResult the result object when the error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) { + params, _ := GetRawParams(options) + resp, err := bucket.doURL("GET", signedURL, params, options, nil, nil) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Response: resp, + } + + // CRC + var crcCalc hash.Hash64 + hasRange, _, _ := IsOptionSet(options, HTTPHeaderRange) + if bucket.GetConfig().IsEnableCRC && !hasRange { + crcCalc = crc64.New(CrcTable()) + result.ServerCRC = resp.ServerCRC + result.ClientCRC = crcCalc + } + + // Progress + listener := GetProgressListener(options) + + contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64) + resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil) + + return result, nil +} + +// +// ProcessObject apply process on the specified image file. +// +// The supported process includes resize, rotate, crop, watermark, format, +// udf, customized style, etc. +// +// +// objectKey object key to process. +// process process string, such as "image/resize,w_100|sys/saveas,o_dGVzdC5qcGc,b_dGVzdA" +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) ProcessObject(objectKey string, process string, options ...Option) (ProcessObjectResult, error) { + var out ProcessObjectResult + params, _ := GetRawParams(options) + params["x-oss-process"] = nil + processData := fmt.Sprintf("%v=%v", "x-oss-process", process) + data := strings.NewReader(processData) + resp, err := bucket.do("POST", objectKey, params, nil, data, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = jsonUnmarshal(resp.Body, &out) + return out, err +} + +// +// PutObjectTagging add tagging to object +// +// objectKey object key to add tagging +// tagging tagging to be added +// +// error nil if success, otherwise error +// +func (bucket Bucket) PutObjectTagging(objectKey string, tagging Tagging, options ...Option) error { + bs, err := xml.Marshal(tagging) + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params, _ := GetRawParams(options) + params["tagging"] = nil + resp, err := bucket.do("PUT", objectKey, params, options, buffer, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// +// GetObjectTagging get tagging of the object +// +// objectKey object key to get tagging +// +// Tagging +// error nil if success, otherwise error + +func (bucket Bucket) GetObjectTagging(objectKey string, options ...Option) (GetObjectTaggingResult, error) { + var out GetObjectTaggingResult + params, _ := GetRawParams(options) + params["tagging"] = nil + + resp, err := bucket.do("GET", objectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// DeleteObjectTagging delete object taggging +// +// objectKey object key to delete tagging +// +// error nil if success, otherwise error +// +func (bucket Bucket) DeleteObjectTagging(objectKey string, options ...Option) error { + params, _ := GetRawParams(options) + params["tagging"] = nil + + if objectKey == "" { + return fmt.Errorf("invalid argument: object name is empty") + } + + resp, err := bucket.do("DELETE", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +func (bucket Bucket) OptionsMethod(objectKey string, options ...Option) (http.Header, error) { + var out http.Header + resp, err := bucket.do("OPTIONS", objectKey, nil, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + out = resp.Headers + return out, nil +} + +// public +func (bucket Bucket) Do(method, objectName string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + return bucket.do(method, objectName, params, options, data, listener) +} + +// Private +func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return nil, err + } + + err = CheckBucketName(bucket.BucketName) + if len(bucket.BucketName) > 0 && err != nil { + return nil, err + } + + resp, err := bucket.Client.Conn.Do(method, bucket.BucketName, objectName, + params, headers, data, 0, listener) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil && resp != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + return resp, err +} + +func (bucket Bucket) doURL(method HTTPMethod, signedURL string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return nil, err + } + + resp, err := bucket.Client.Conn.DoURL(method, signedURL, headers, data, 0, listener) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + return resp, err +} + +func (bucket Bucket) GetConfig() *Config { + return bucket.Client.Config +} + +func AddContentType(options []Option, keys ...string) []Option { + typ := TypeByExtension("") + for _, key := range keys { + typ = TypeByExtension(key) + if typ != "" { + break + } + } + + if typ == "" { + typ = "application/octet-stream" + } + + opts := []Option{ContentType(typ)} + opts = append(opts, options...) + + return opts +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go new file mode 100644 index 000000000000..d725a40ebc48 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go @@ -0,0 +1,2056 @@ +// Package oss implements functions for access oss service. +// It has two main struct Client and Bucket. +package oss + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "strings" + "time" +) + +// Client SDK's entry point. It's for bucket related options such as create/delete/set bucket (such as set/get ACL/lifecycle/referer/logging/website). +// Object related operations are done by Bucket class. +// Users use oss.New to create Client instance. +// +type ( + // Client OSS client + Client struct { + Config *Config // OSS client configuration + Conn *Conn // Send HTTP request + HTTPClient *http.Client //http.Client to use - if nil will make its own + } + + // ClientOption client option such as UseCname, Timeout, SecurityToken. + ClientOption func(*Client) +) + +// New creates a new client. +// +// endpoint the OSS datacenter endpoint such as http://oss-cn-hangzhou.aliyuncs.com . +// accessKeyId access key Id. +// accessKeySecret access key secret. +// +// Client creates the new client instance, the returned value is valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) { + // Configuration + config := getDefaultOssConfig() + config.Endpoint = endpoint + config.AccessKeyID = accessKeyID + config.AccessKeySecret = accessKeySecret + + // URL parse + url := &urlMaker{} + err := url.Init(config.Endpoint, config.IsCname, config.IsUseProxy) + if err != nil { + return nil, err + } + + // HTTP connect + conn := &Conn{config: config, url: url} + + // OSS client + client := &Client{ + Config: config, + Conn: conn, + } + + // Client options parse + for _, option := range options { + option(client) + } + + if config.AuthVersion != AuthV1 && config.AuthVersion != AuthV2 { + return nil, fmt.Errorf("Init client Error, invalid Auth version: %v", config.AuthVersion) + } + + // Create HTTP connection + err = conn.init(config, url, client.HTTPClient) + + return client, err +} + +// Bucket gets the bucket instance. +// +// bucketName the bucket name. +// Bucket the bucket object, when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) Bucket(bucketName string) (*Bucket, error) { + err := CheckBucketName(bucketName) + if err != nil { + return nil, err + } + + return &Bucket{ + client, + bucketName, + }, nil +} + +// CreateBucket creates a bucket. +// +// bucketName the bucket name, it's globably unique and immutable. The bucket name can only consist of lowercase letters, numbers and dash ('-'). +// It must start with lowercase letter or number and the length can only be between 3 and 255. +// options options for creating the bucket, with optional ACL. The ACL could be ACLPrivate, ACLPublicRead, and ACLPublicReadWrite. By default it's ACLPrivate. +// It could also be specified with StorageClass option, which supports StorageStandard, StorageIA(infrequent access), StorageArchive. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) CreateBucket(bucketName string, options ...Option) error { + headers := make(map[string]string) + handleOptions(headers, options) + + buffer := new(bytes.Buffer) + + var cbConfig createBucketConfiguration + cbConfig.StorageClass = StorageStandard + + isStorageSet, valStroage, _ := IsOptionSet(options, storageClass) + isRedundancySet, valRedundancy, _ := IsOptionSet(options, redundancyType) + isObjectHashFuncSet, valHashFunc, _ := IsOptionSet(options, objectHashFunc) + if isStorageSet { + cbConfig.StorageClass = valStroage.(StorageClassType) + } + + if isRedundancySet { + cbConfig.DataRedundancyType = valRedundancy.(DataRedundancyType) + } + + if isObjectHashFuncSet { + cbConfig.ObjectHashFunction = valHashFunc.(ObjecthashFuncType) + } + + bs, err := xml.Marshal(cbConfig) + if err != nil { + return err + } + buffer.Write(bs) + contentType := http.DetectContentType(buffer.Bytes()) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// create bucket xml +func (client Client) CreateBucketXml(bucketName string, xmlBody string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// ListBuckets lists buckets of the current account under the given endpoint, with optional filters. +// +// options specifies the filters such as Prefix, Marker and MaxKeys. Prefix is the bucket name's prefix filter. +// And marker makes sure the returned buckets' name are greater than it in lexicographic order. +// Maxkeys limits the max keys to return, and by default it's 100 and up to 1000. +// For the common usage scenario, please check out list_bucket.go in the sample. +// ListBucketsResponse the response object if error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) { + var out ListBucketsResult + + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + resp, err := client.do("GET", "", params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// IsBucketExist checks if the bucket exists +// +// bucketName the bucket name. +// +// bool true if it exists, and it's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) IsBucketExist(bucketName string) (bool, error) { + listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1)) + if err != nil { + return false, err + } + + if len(listRes.Buckets) == 1 && listRes.Buckets[0].Name == bucketName { + return true, nil + } + return false, nil +} + +// DeleteBucket deletes the bucket. Only empty bucket can be deleted (no object and parts). +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucket(bucketName string, options ...Option) error { + params := map[string]interface{}{} + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLocation gets the bucket location. +// +// Checks out the following link for more information : +// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html +// +// bucketName the bucket name +// +// string bucket's datacenter location +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLocation(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["location"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var LocationConstraint string + err = xmlUnmarshal(resp.Body, &LocationConstraint) + return LocationConstraint, err +} + +// SetBucketACL sets bucket's ACL. +// +// bucketName the bucket name +// bucketAcl the bucket ACL: ACLPrivate, ACLPublicRead and ACLPublicReadWrite. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketACL(bucketName string, bucketACL ACLType, options ...Option) error { + headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)} + params := map[string]interface{}{} + params["acl"] = nil + resp, err := client.do("PUT", bucketName, params, headers, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketACL gets the bucket ACL. +// +// bucketName the bucket name. +// +// GetBucketAclResponse the result object, and it's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketACL(bucketName string, options ...Option) (GetBucketACLResult, error) { + var out GetBucketACLResult + params := map[string]interface{}{} + params["acl"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketLifecycle sets the bucket's lifecycle. +// +// For more information, checks out following link: +// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html +// +// bucketName the bucket name. +// rules the lifecycle rules. There're two kind of rules: absolute time expiration and relative time expiration in days and day/month/year respectively. +// Check out sample/bucket_lifecycle.go for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule, options ...Option) error { + err := verifyLifecycleRules(rules) + if err != nil { + return err + } + lifecycleCfg := LifecycleConfiguration{Rules: rules} + bs, err := xml.Marshal(lifecycleCfg) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// SetBucketLifecycleXml sets the bucket's lifecycle rule from xml config +func (client Client) SetBucketLifecycleXml(bucketName string, xmlBody string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketLifecycle deletes the bucket's lifecycle. +// +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketLifecycle(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLifecycle gets the bucket's lifecycle settings. +// +// bucketName the bucket name. +// +// GetBucketLifecycleResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLifecycle(bucketName string, options ...Option) (GetBucketLifecycleResult, error) { + var out GetBucketLifecycleResult + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + + // NonVersionTransition is not suggested to use + // to keep compatible + for k, rule := range out.Rules { + if len(rule.NonVersionTransitions) > 0 { + out.Rules[k].NonVersionTransition = &(out.Rules[k].NonVersionTransitions[0]) + } + } + return out, err +} + +func (client Client) GetBucketLifecycleXml(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + out := string(body) + return out, err +} + +// SetBucketReferer sets the bucket's referer whitelist and the flag if allowing empty referrer. +// +// To avoid stealing link on OSS data, OSS supports the HTTP referrer header. A whitelist referrer could be set either by API or web console, as well as +// the allowing empty referrer flag. Note that this applies to requests from webbrowser only. +// For example, for a bucket os-example and its referrer http://www.aliyun.com, all requests from this URL could access the bucket. +// For more information, please check out this link : +// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html +// +// bucketName the bucket name. +// referers the referrer white list. A bucket could have a referrer list and each referrer supports one '*' and multiple '?' as wildcards. +// The sample could be found in sample/bucket_referer.go +// allowEmptyReferer the flag of allowing empty referrer. By default it's true. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool, options ...Option) error { + rxml := RefererXML{} + rxml.AllowEmptyReferer = allowEmptyReferer + if referers == nil { + rxml.RefererList = append(rxml.RefererList, "") + } else { + for _, referer := range referers { + rxml.RefererList = append(rxml.RefererList, referer) + } + } + + bs, err := xml.Marshal(rxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["referer"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketReferer gets the bucket's referrer white list. +// +// bucketName the bucket name. +// +// GetBucketRefererResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReferer(bucketName string, options ...Option) (GetBucketRefererResult, error) { + var out GetBucketRefererResult + params := map[string]interface{}{} + params["referer"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketLogging sets the bucket logging settings. +// +// OSS could automatically store the access log. Only the bucket owner could enable the logging. +// Once enabled, OSS would save all the access log into hourly log files in a specified bucket. +// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html +// +// bucketName bucket name to enable the log. +// targetBucket the target bucket name to store the log files. +// targetPrefix the log files' prefix. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string, + isEnable bool, options ...Option) error { + var err error + var bs []byte + if isEnable { + lxml := LoggingXML{} + lxml.LoggingEnabled.TargetBucket = targetBucket + lxml.LoggingEnabled.TargetPrefix = targetPrefix + bs, err = xml.Marshal(lxml) + } else { + lxml := loggingXMLEmpty{} + bs, err = xml.Marshal(lxml) + } + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketLogging deletes the logging configuration to disable the logging on the bucket. +// +// bucketName the bucket name to disable the logging. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketLogging(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLogging gets the bucket's logging settings +// +// bucketName the bucket name +// GetBucketLoggingResponse the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLogging(bucketName string, options ...Option) (GetBucketLoggingResult, error) { + var out GetBucketLoggingResult + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketWebsite sets the bucket's static website's index and error page. +// +// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website. +// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName the bucket name to enable static web site. +// indexDocument index page. +// errorDocument error page. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string, options ...Option) error { + wxml := WebsiteXML{} + wxml.IndexDocument.Suffix = indexDocument + wxml.ErrorDocument.Key = errorDocument + + bs, err := xml.Marshal(wxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// SetBucketWebsiteDetail sets the bucket's static website's detail +// +// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website. +// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName the bucket name to enable static web site. +// +// wxml the website's detail +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketWebsiteDetail(bucketName string, wxml WebsiteXML, options ...Option) error { + bs, err := xml.Marshal(wxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// SetBucketWebsiteXml sets the bucket's static website's rule +// +// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website. +// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName the bucket name to enable static web site. +// +// wxml the website's detail +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketWebsiteXml(bucketName string, webXml string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(webXml)) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketWebsite deletes the bucket's static web site settings. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketWebsite(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketWebsite gets the bucket's default page (index page) and the error page. +// +// bucketName the bucket name +// +// GetBucketWebsiteResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketWebsite(bucketName string, options ...Option) (GetBucketWebsiteResult, error) { + var out GetBucketWebsiteResult + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketWebsiteXml gets the bucket's website config xml config. +// +// bucketName the bucket name +// +// string the bucket's xml config, It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketWebsiteXml(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + + out := string(body) + return out, err +} + +// SetBucketCORS sets the bucket's CORS rules +// +// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html +// +// bucketName the bucket name +// corsRules the CORS rules to set. The related sample code is in sample/bucket_cors.go. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule, options ...Option) error { + corsxml := CORSXML{} + for _, v := range corsRules { + cr := CORSRule{} + cr.AllowedMethod = v.AllowedMethod + cr.AllowedOrigin = v.AllowedOrigin + cr.AllowedHeader = v.AllowedHeader + cr.ExposeHeader = v.ExposeHeader + cr.MaxAgeSeconds = v.MaxAgeSeconds + corsxml.CORSRules = append(corsxml.CORSRules, cr) + } + + bs, err := xml.Marshal(corsxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +func (client Client) SetBucketCORSXml(bucketName string, xmlBody string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketCORS deletes the bucket's static website settings. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketCORS(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketCORS gets the bucket's CORS settings. +// +// bucketName the bucket name. +// GetBucketCORSResult the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketCORS(bucketName string, options ...Option) (GetBucketCORSResult, error) { + var out GetBucketCORSResult + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +func (client Client) GetBucketCORSXml(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + out := string(body) + return out, err +} + +// GetBucketInfo gets the bucket information. +// +// bucketName the bucket name. +// GetBucketInfoResult the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketInfo(bucketName string, options ...Option) (GetBucketInfoResult, error) { + var out GetBucketInfoResult + params := map[string]interface{}{} + params["bucketInfo"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + + // convert None to "" + if err == nil { + if out.BucketInfo.SseRule.KMSMasterKeyID == "None" { + out.BucketInfo.SseRule.KMSMasterKeyID = "" + } + + if out.BucketInfo.SseRule.SSEAlgorithm == "None" { + out.BucketInfo.SseRule.SSEAlgorithm = "" + } + + if out.BucketInfo.SseRule.KMSDataEncryption == "None" { + out.BucketInfo.SseRule.KMSDataEncryption = "" + } + } + return out, err +} + +// SetBucketVersioning set bucket versioning:Enabled、Suspended +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) SetBucketVersioning(bucketName string, versioningConfig VersioningConfig, options ...Option) error { + var err error + var bs []byte + bs, err = xml.Marshal(versioningConfig) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["versioning"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketVersioning get bucket versioning status:Enabled、Suspended +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketVersioning(bucketName string, options ...Option) (GetBucketVersioningResult, error) { + var out GetBucketVersioningResult + params := map[string]interface{}{} + params["versioning"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketEncryption set bucket encryption config +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) SetBucketEncryption(bucketName string, encryptionRule ServerEncryptionRule, options ...Option) error { + var err error + var bs []byte + bs, err = xml.Marshal(encryptionRule) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["encryption"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketEncryption get bucket encryption +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketEncryption(bucketName string, options ...Option) (GetBucketEncryptionResult, error) { + var out GetBucketEncryptionResult + params := map[string]interface{}{} + params["encryption"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketEncryption delete bucket encryption config +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error bucket +func (client Client) DeleteBucketEncryption(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["encryption"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// SetBucketTagging add tagging to bucket +// bucketName name of bucket +// tagging tagging to be added +// error nil if success, otherwise error +func (client Client) SetBucketTagging(bucketName string, tagging Tagging, options ...Option) error { + var err error + var bs []byte + bs, err = xml.Marshal(tagging) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["tagging"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketTagging get tagging of the bucket +// bucketName name of bucket +// error nil if success, otherwise error +func (client Client) GetBucketTagging(bucketName string, options ...Option) (GetBucketTaggingResult, error) { + var out GetBucketTaggingResult + params := map[string]interface{}{} + params["tagging"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// DeleteBucketTagging delete bucket tagging +// bucketName name of bucket +// error nil if success, otherwise error +// +func (client Client) DeleteBucketTagging(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["tagging"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketStat get bucket stat +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketStat(bucketName string, options ...Option) (GetBucketStatResult, error) { + var out GetBucketStatResult + params := map[string]interface{}{} + params["stat"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketPolicy API operation for Object Storage Service. +// +// Get the policy from the bucket. +// +// bucketName the bucket name. +// +// string return the bucket's policy, and it's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketPolicy(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["policy"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + + out := string(body) + return out, err +} + +// SetBucketPolicy API operation for Object Storage Service. +// +// Set the policy from the bucket. +// +// bucketName the bucket name. +// +// policy the bucket policy. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketPolicy(bucketName string, policy string, options ...Option) error { + params := map[string]interface{}{} + params["policy"] = nil + + buffer := strings.NewReader(policy) + + resp, err := client.do("PUT", bucketName, params, nil, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketPolicy API operation for Object Storage Service. +// +// Deletes the policy from the bucket. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketPolicy(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["policy"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// SetBucketRequestPayment API operation for Object Storage Service. +// +// Set the requestPayment of bucket +// +// bucketName the bucket name. +// +// paymentConfig the payment configuration +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketRequestPayment(bucketName string, paymentConfig RequestPaymentConfiguration, options ...Option) error { + params := map[string]interface{}{} + params["requestPayment"] = nil + + var bs []byte + bs, err := xml.Marshal(paymentConfig) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketRequestPayment API operation for Object Storage Service. +// +// Get bucket requestPayment +// +// bucketName the bucket name. +// +// RequestPaymentConfiguration the payment configuration +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketRequestPayment(bucketName string, options ...Option) (RequestPaymentConfiguration, error) { + var out RequestPaymentConfiguration + params := map[string]interface{}{} + params["requestPayment"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetUserQoSInfo API operation for Object Storage Service. +// +// Get user qos. +// +// UserQoSConfiguration the User Qos and range Information. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetUserQoSInfo(options ...Option) (UserQoSConfiguration, error) { + var out UserQoSConfiguration + params := map[string]interface{}{} + params["qosInfo"] = nil + + resp, err := client.do("GET", "", params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketQoSInfo API operation for Object Storage Service. +// +// Set Bucket Qos information. +// +// bucketName the bucket name. +// +// qosConf the qos configuration. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketQoSInfo(bucketName string, qosConf BucketQoSConfiguration, options ...Option) error { + params := map[string]interface{}{} + params["qosInfo"] = nil + + var bs []byte + bs, err := xml.Marshal(qosConf) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentTpye := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentTpye + + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketQosInfo API operation for Object Storage Service. +// +// Get Bucket Qos information. +// +// bucketName the bucket name. +// +// BucketQoSConfiguration the return qos configuration. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketQosInfo(bucketName string, options ...Option) (BucketQoSConfiguration, error) { + var out BucketQoSConfiguration + params := map[string]interface{}{} + params["qosInfo"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketQosInfo API operation for Object Storage Service. +// +// Delete Bucket QoS information. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketQosInfo(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["qosInfo"] = nil + + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// SetBucketInventory API operation for Object Storage Service +// +// Set the Bucket inventory. +// +// bucketName tht bucket name. +// +// inventoryConfig the inventory configuration. +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) SetBucketInventory(bucketName string, inventoryConfig InventoryConfiguration, options ...Option) error { + params := map[string]interface{}{} + params["inventoryId"] = inventoryConfig.Id + params["inventory"] = nil + + var bs []byte + bs, err := xml.Marshal(inventoryConfig) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + + if err != nil { + return err + } + + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketInventory API operation for Object Storage Service +// +// Get the Bucket inventory. +// +// bucketName tht bucket name. +// +// strInventoryId the inventory id. +// +// InventoryConfiguration the inventory configuration. +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) GetBucketInventory(bucketName string, strInventoryId string, options ...Option) (InventoryConfiguration, error) { + var out InventoryConfiguration + params := map[string]interface{}{} + params["inventory"] = nil + params["inventoryId"] = strInventoryId + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// ListBucketInventory API operation for Object Storage Service +// +// List the Bucket inventory. +// +// bucketName tht bucket name. +// +// continuationToken the users token. +// +// ListInventoryConfigurationsResult list all inventory configuration by . +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) ListBucketInventory(bucketName, continuationToken string, options ...Option) (ListInventoryConfigurationsResult, error) { + var out ListInventoryConfigurationsResult + params := map[string]interface{}{} + params["inventory"] = nil + if continuationToken == "" { + params["continuation-token"] = nil + } else { + params["continuation-token"] = continuationToken + } + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketInventory API operation for Object Storage Service. +// +// Delete Bucket inventory information. +// +// bucketName tht bucket name. +// +// strInventoryId the inventory id. +// +// error it's nil if no error, otherwise it's an error. +// +func (client Client) DeleteBucketInventory(bucketName, strInventoryId string, options ...Option) error { + params := map[string]interface{}{} + params["inventory"] = nil + params["inventoryId"] = strInventoryId + + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// SetBucketAsyncTask API operation for set async fetch task +// +// bucketName tht bucket name. +// +// asynConf configruation +// +// error it's nil if success, otherwise it's an error. +func (client Client) SetBucketAsyncTask(bucketName string, asynConf AsyncFetchTaskConfiguration, options ...Option) (AsyncFetchTaskResult, error) { + var out AsyncFetchTaskResult + params := map[string]interface{}{} + params["asyncFetch"] = nil + + var bs []byte + bs, err := xml.Marshal(asynConf) + + if err != nil { + return out, err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + + if err != nil { + return out, err + } + + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketAsyncTask API operation for set async fetch task +// +// bucketName tht bucket name. +// +// taskid returned by SetBucketAsyncTask +// +// error it's nil if success, otherwise it's an error. +func (client Client) GetBucketAsyncTask(bucketName string, taskID string, options ...Option) (AsynFetchTaskInfo, error) { + var out AsynFetchTaskInfo + params := map[string]interface{}{} + params["asyncFetch"] = nil + + headers := make(map[string]string) + headers[HTTPHeaderOssTaskID] = taskID + resp, err := client.do("GET", bucketName, params, headers, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// InitiateBucketWorm creates bucket worm Configuration +// bucketName the bucket name. +// retentionDays the retention period in days +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) InitiateBucketWorm(bucketName string, retentionDays int, options ...Option) (string, error) { + var initiateWormConf InitiateWormConfiguration + initiateWormConf.RetentionPeriodInDays = retentionDays + + var respHeader http.Header + isOptSet, _, _ := IsOptionSet(options, responseHeader) + if !isOptSet { + options = append(options, GetResponseHeader(&respHeader)) + } + + bs, err := xml.Marshal(initiateWormConf) + if err != nil { + return "", err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["worm"] = nil + + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respOpt, _ := FindOption(options, responseHeader, nil) + wormID := "" + err = CheckRespCode(resp.StatusCode, []int{http.StatusOK}) + if err == nil && respOpt != nil { + wormID = (respOpt.(*http.Header)).Get("x-oss-worm-id") + } + return wormID, err +} + +// AbortBucketWorm delete bucket worm Configuration +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) AbortBucketWorm(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["worm"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// CompleteBucketWorm complete bucket worm Configuration +// bucketName the bucket name. +// wormID the worm id +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) CompleteBucketWorm(bucketName string, wormID string, options ...Option) error { + params := map[string]interface{}{} + params["wormId"] = wormID + resp, err := client.do("POST", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// ExtendBucketWorm exetend bucket worm Configuration +// bucketName the bucket name. +// retentionDays the retention period in days +// wormID the worm id +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) ExtendBucketWorm(bucketName string, retentionDays int, wormID string, options ...Option) error { + var extendWormConf ExtendWormConfiguration + extendWormConf.RetentionPeriodInDays = retentionDays + + bs, err := xml.Marshal(extendWormConf) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["wormId"] = wormID + params["wormExtend"] = nil + + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketWorm get bucket worm Configuration +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketWorm(bucketName string, options ...Option) (WormConfiguration, error) { + var out WormConfiguration + params := map[string]interface{}{} + params["worm"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketTransferAcc set bucket transfer acceleration configuration +// bucketName the bucket name. +// accConf bucket transfer acceleration configuration +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketTransferAcc(bucketName string, accConf TransferAccConfiguration, options ...Option) error { + bs, err := xml.Marshal(accConf) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["transferAcceleration"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketTransferAcc get bucket transfer acceleration configuration +// bucketName the bucket name. +// accConf bucket transfer acceleration configuration +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketTransferAcc(bucketName string, options ...Option) (TransferAccConfiguration, error) { + var out TransferAccConfiguration + params := map[string]interface{}{} + params["transferAcceleration"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketTransferAcc delete bucket transfer acceleration configuration +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketTransferAcc(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["transferAcceleration"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// PutBucketReplication put bucket replication configuration +// bucketName the bucket name. +// xmlBody the replication configuration. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) PutBucketReplication(bucketName string, xmlBody string, options ...Option) error { + buffer := new(bytes.Buffer) + buffer.Write([]byte(xmlBody)) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["replication"] = nil + params["comp"] = "add" + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketReplication get bucket replication configuration +// bucketName the bucket name. +// string the replication configuration. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReplication(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["replication"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), err +} + +// DeleteBucketReplication delete bucket replication configuration +// bucketName the bucket name. +// ruleId the ID of the replication configuration. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketReplication(bucketName string, ruleId string, options ...Option) error { + replicationxml := ReplicationXML{} + replicationxml.ID = ruleId + + bs, err := xml.Marshal(replicationxml) + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["replication"] = nil + params["comp"] = "delete" + resp, err := client.do("POST", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketReplicationLocation get the locations of the target bucket that can be copied to +// bucketName the bucket name. +// string the locations of the target bucket that can be copied to. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReplicationLocation(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["replicationLocation"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), err +} + +// GetBucketReplicationProgress get the replication progress of bucket +// bucketName the bucket name. +// ruleId the ID of the replication configuration. +// string the replication progress of bucket. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReplicationProgress(bucketName string, ruleId string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["replicationProgress"] = nil + if ruleId != "" { + params["rule-id"] = ruleId + } + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), err +} + +// GetBucketCname get bucket's binding cname +// bucketName the bucket name. +// string the xml configuration of bucket. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketCname(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["cname"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(data), err +} + +// LimitUploadSpeed set upload bandwidth limit speed,default is 0,unlimited +// upSpeed KB/s, 0 is unlimited,default is 0 +// error it's nil if success, otherwise failure +func (client Client) LimitUploadSpeed(upSpeed int) error { + if client.Config == nil { + return fmt.Errorf("client config is nil") + } + return client.Config.LimitUploadSpeed(upSpeed) +} + +// LimitDownloadSpeed set download bandwidth limit speed,default is 0,unlimited +// downSpeed KB/s, 0 is unlimited,default is 0 +// error it's nil if success, otherwise failure +func (client Client) LimitDownloadSpeed(downSpeed int) error { + if client.Config == nil { + return fmt.Errorf("client config is nil") + } + return client.Config.LimitDownloadSpeed(downSpeed) +} + +// UseCname sets the flag of using CName. By default it's false. +// +// isUseCname true: the endpoint has the CName, false: the endpoint does not have cname. Default is false. +// +func UseCname(isUseCname bool) ClientOption { + return func(client *Client) { + client.Config.IsCname = isUseCname + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// Timeout sets the HTTP timeout in seconds. +// +// connectTimeoutSec HTTP timeout in seconds. Default is 10 seconds. 0 means infinite (not recommended) +// readWriteTimeout HTTP read or write's timeout in seconds. Default is 20 seconds. 0 means infinite. +// +func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption { + return func(client *Client) { + client.Config.HTTPTimeout.ConnectTimeout = + time.Second * time.Duration(connectTimeoutSec) + client.Config.HTTPTimeout.ReadWriteTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.HeaderTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.IdleConnTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.LongTimeout = + time.Second * time.Duration(readWriteTimeout*10) + } +} + +// SecurityToken sets the temporary user's SecurityToken. +// +// token STS token +// +func SecurityToken(token string) ClientOption { + return func(client *Client) { + client.Config.SecurityToken = strings.TrimSpace(token) + } +} + +// EnableMD5 enables MD5 validation. +// +// isEnableMD5 true: enable MD5 validation; false: disable MD5 validation. +// +func EnableMD5(isEnableMD5 bool) ClientOption { + return func(client *Client) { + client.Config.IsEnableMD5 = isEnableMD5 + } +} + +// MD5ThresholdCalcInMemory sets the memory usage threshold for computing the MD5, default is 16MB. +// +// threshold the memory threshold in bytes. When the uploaded content is more than 16MB, the temp file is used for computing the MD5. +// +func MD5ThresholdCalcInMemory(threshold int64) ClientOption { + return func(client *Client) { + client.Config.MD5Threshold = threshold + } +} + +// EnableCRC enables the CRC checksum. Default is true. +// +// isEnableCRC true: enable CRC checksum; false: disable the CRC checksum. +// +func EnableCRC(isEnableCRC bool) ClientOption { + return func(client *Client) { + client.Config.IsEnableCRC = isEnableCRC + } +} + +// UserAgent specifies UserAgent. The default is aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2). +// +// userAgent the user agent string. +// +func UserAgent(userAgent string) ClientOption { + return func(client *Client) { + client.Config.UserAgent = userAgent + client.Config.UserSetUa = true + } +} + +// Proxy sets the proxy (optional). The default is not using proxy. +// +// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 . +// +func Proxy(proxyHost string) ClientOption { + return func(client *Client) { + client.Config.IsUseProxy = true + client.Config.ProxyHost = proxyHost + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// AuthProxy sets the proxy information with user name and password. +// +// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 . +// proxyUser the proxy user name. +// proxyPassword the proxy password. +// +func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption { + return func(client *Client) { + client.Config.IsUseProxy = true + client.Config.ProxyHost = proxyHost + client.Config.IsAuthProxy = true + client.Config.ProxyUser = proxyUser + client.Config.ProxyPassword = proxyPassword + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// +// HTTPClient sets the http.Client in use to the one passed in +// +func HTTPClient(HTTPClient *http.Client) ClientOption { + return func(client *Client) { + client.HTTPClient = HTTPClient + } +} + +// +// SetLogLevel sets the oss sdk log level +// +func SetLogLevel(LogLevel int) ClientOption { + return func(client *Client) { + client.Config.LogLevel = LogLevel + } +} + +// +// SetLogger sets the oss sdk logger +// +func SetLogger(Logger *log.Logger) ClientOption { + return func(client *Client) { + client.Config.Logger = Logger + } +} + +// SetCredentialsProvider sets funciton for get the user's ak +func SetCredentialsProvider(provider CredentialsProvider) ClientOption { + return func(client *Client) { + client.Config.CredentialsProvider = provider + } +} + +// SetLocalAddr sets funciton for local addr +func SetLocalAddr(localAddr net.Addr) ClientOption { + return func(client *Client) { + client.Config.LocalAddr = localAddr + } +} + +// AuthVersion sets auth version: v1 or v2 signature which oss_server needed +func AuthVersion(authVersion AuthVersionType) ClientOption { + return func(client *Client) { + client.Config.AuthVersion = authVersion + } +} + +// AdditionalHeaders sets special http headers needed to be signed +func AdditionalHeaders(headers []string) ClientOption { + return func(client *Client) { + client.Config.AdditionalHeaders = headers + } +} + +// only effective from go1.7 onward,RedirectEnabled set http redirect enabled or not +func RedirectEnabled(enabled bool) ClientOption { + return func(client *Client) { + client.Config.RedirectEnabled = enabled + } +} + +// skip verifying tls certificate file +func InsecureSkipVerify(enabled bool) ClientOption { + return func(client *Client) { + client.Config.InsecureSkipVerify = enabled + } +} + +// Private +func (client Client) do(method, bucketName string, params map[string]interface{}, + headers map[string]string, data io.Reader, options ...Option) (*Response, error) { + err := CheckBucketName(bucketName) + if len(bucketName) > 0 && err != nil { + return nil, err + } + + // option headers + addHeaders := make(map[string]string) + err = handleOptions(addHeaders, options) + if err != nil { + return nil, err + } + + // merge header + if headers == nil { + headers = make(map[string]string) + } + + for k, v := range addHeaders { + if _, ok := headers[k]; !ok { + headers[k] = v + } + } + + resp, err := client.Conn.Do(method, bucketName, "", params, headers, data, 0, nil) + + // get response header + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + return resp, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go new file mode 100644 index 000000000000..d43527255feb --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go @@ -0,0 +1,207 @@ +package oss + +import ( + "bytes" + "fmt" + "log" + "net" + "os" + "time" +) + +// Define the level of the output log +const ( + LogOff = iota + Error + Warn + Info + Debug +) + +// LogTag Tag for each level of log +var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"} + +// HTTPTimeout defines HTTP timeout. +type HTTPTimeout struct { + ConnectTimeout time.Duration + ReadWriteTimeout time.Duration + HeaderTimeout time.Duration + LongTimeout time.Duration + IdleConnTimeout time.Duration +} + +// HTTPMaxConns defines max idle connections and max idle connections per host +type HTTPMaxConns struct { + MaxIdleConns int + MaxIdleConnsPerHost int +} + +// CredentialInf is interface for get AccessKeyID,AccessKeySecret,SecurityToken +type Credentials interface { + GetAccessKeyID() string + GetAccessKeySecret() string + GetSecurityToken() string +} + +// CredentialInfBuild is interface for get CredentialInf +type CredentialsProvider interface { + GetCredentials() Credentials +} + +type defaultCredentials struct { + config *Config +} + +func (defCre *defaultCredentials) GetAccessKeyID() string { + return defCre.config.AccessKeyID +} + +func (defCre *defaultCredentials) GetAccessKeySecret() string { + return defCre.config.AccessKeySecret +} + +func (defCre *defaultCredentials) GetSecurityToken() string { + return defCre.config.SecurityToken +} + +type defaultCredentialsProvider struct { + config *Config +} + +func (defBuild *defaultCredentialsProvider) GetCredentials() Credentials { + return &defaultCredentials{config: defBuild.config} +} + +// Config defines oss configuration +type Config struct { + Endpoint string // OSS endpoint + AccessKeyID string // AccessId + AccessKeySecret string // AccessKey + RetryTimes uint // Retry count by default it's 5. + UserAgent string // SDK name/version/system information + IsDebug bool // Enable debug mode. Default is false. + Timeout uint // Timeout in seconds. By default it's 60. + SecurityToken string // STS Token + IsCname bool // If cname is in the endpoint. + HTTPTimeout HTTPTimeout // HTTP timeout + HTTPMaxConns HTTPMaxConns // Http max connections + IsUseProxy bool // Flag of using proxy. + ProxyHost string // Flag of using proxy host. + IsAuthProxy bool // Flag of needing authentication. + ProxyUser string // Proxy user + ProxyPassword string // Proxy password + IsEnableMD5 bool // Flag of enabling MD5 for upload. + MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used. + IsEnableCRC bool // Flag of enabling CRC for upload. + LogLevel int // Log level + Logger *log.Logger // For write log + UploadLimitSpeed int // Upload limit speed:KB/s, 0 is unlimited + UploadLimiter *OssLimiter // Bandwidth limit reader for upload + DownloadLimitSpeed int // Download limit speed:KB/s, 0 is unlimited + DownloadLimiter *OssLimiter // Bandwidth limit reader for download + CredentialsProvider CredentialsProvider // User provides interface to get AccessKeyID, AccessKeySecret, SecurityToken + LocalAddr net.Addr // local client host info + UserSetUa bool // UserAgent is set by user or not + AuthVersion AuthVersionType // v1 or v2 signature,default is v1 + AdditionalHeaders []string // special http headers needed to be sign + RedirectEnabled bool // only effective from go1.7 onward, enable http redirect or not + InsecureSkipVerify bool // for https, Whether to skip verifying the server certificate file +} + +// LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0 +func (config *Config) LimitUploadSpeed(uploadSpeed int) error { + if uploadSpeed < 0 { + return fmt.Errorf("invalid argument, the value of uploadSpeed is less than 0") + } else if uploadSpeed == 0 { + config.UploadLimitSpeed = 0 + config.UploadLimiter = nil + return nil + } + + var err error + config.UploadLimiter, err = GetOssLimiter(uploadSpeed) + if err == nil { + config.UploadLimitSpeed = uploadSpeed + } + return err +} + +// LimitDownLoadSpeed downloadSpeed:KB/s, 0 is unlimited,default is 0 +func (config *Config) LimitDownloadSpeed(downloadSpeed int) error { + if downloadSpeed < 0 { + return fmt.Errorf("invalid argument, the value of downloadSpeed is less than 0") + } else if downloadSpeed == 0 { + config.DownloadLimitSpeed = 0 + config.DownloadLimiter = nil + return nil + } + + var err error + config.DownloadLimiter, err = GetOssLimiter(downloadSpeed) + if err == nil { + config.DownloadLimitSpeed = downloadSpeed + } + return err +} + +// WriteLog output log function +func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) { + if config.LogLevel < LogLevel || config.Logger == nil { + return + } + + var logBuffer bytes.Buffer + logBuffer.WriteString(LogTag[LogLevel-1]) + logBuffer.WriteString(fmt.Sprintf(format, a...)) + config.Logger.Printf("%s", logBuffer.String()) +} + +// for get Credentials +func (config *Config) GetCredentials() Credentials { + return config.CredentialsProvider.GetCredentials() +} + +// getDefaultOssConfig gets the default configuration. +func getDefaultOssConfig() *Config { + config := Config{} + + config.Endpoint = "" + config.AccessKeyID = "" + config.AccessKeySecret = "" + config.RetryTimes = 5 + config.IsDebug = false + config.UserAgent = userAgent() + config.Timeout = 60 // Seconds + config.SecurityToken = "" + config.IsCname = false + + config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s + config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s + config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s + config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s + config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s + config.HTTPMaxConns.MaxIdleConns = 100 + config.HTTPMaxConns.MaxIdleConnsPerHost = 100 + + config.IsUseProxy = false + config.ProxyHost = "" + config.IsAuthProxy = false + config.ProxyUser = "" + config.ProxyPassword = "" + + config.MD5Threshold = 16 * 1024 * 1024 // 16MB + config.IsEnableMD5 = false + config.IsEnableCRC = true + + config.LogLevel = LogOff + config.Logger = log.New(os.Stdout, "", log.LstdFlags) + + provider := &defaultCredentialsProvider{config: &config} + config.CredentialsProvider = provider + + config.AuthVersion = AuthV1 + config.RedirectEnabled = true + config.InsecureSkipVerify = false + + return &config +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go new file mode 100644 index 000000000000..cf2efee47a09 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go @@ -0,0 +1,852 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" + "time" +) + +// Conn defines OSS Conn +type Conn struct { + config *Config + url *urlMaker + client *http.Client +} + +var signKeyList = []string{"acl", "uploads", "location", "cors", + "logging", "website", "referer", "lifecycle", + "delete", "append", "tagging", "objectMeta", + "uploadId", "partNumber", "security-token", + "position", "img", "style", "styleName", + "replication", "replicationProgress", + "replicationLocation", "cname", "bucketInfo", + "comp", "qos", "live", "status", "vod", + "startTime", "endTime", "symlink", + "x-oss-process", "response-content-type", "x-oss-traffic-limit", + "response-content-language", "response-expires", + "response-cache-control", "response-content-disposition", + "response-content-encoding", "udf", "udfName", "udfImage", + "udfId", "udfImageDesc", "udfApplication", "comp", + "udfApplicationLog", "restore", "callback", "callback-var", "qosInfo", + "policy", "stat", "encryption", "versions", "versioning", "versionId", "requestPayment", + "x-oss-request-payer", "sequential", + "inventory", "inventoryId", "continuation-token", "asyncFetch", + "worm", "wormId", "wormExtend", "withHashContext", + "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256", + "x-oss-hash-ctx", "x-oss-md5-ctx", "transferAcceleration", + "regionList", +} + +// init initializes Conn +func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error { + if client == nil { + // New transport + transport := newTransport(conn, config) + + // Proxy + if conn.config.IsUseProxy { + proxyURL, err := url.Parse(config.ProxyHost) + if err != nil { + return err + } + if config.IsAuthProxy { + if config.ProxyPassword != "" { + proxyURL.User = url.UserPassword(config.ProxyUser, config.ProxyPassword) + } else { + proxyURL.User = url.User(config.ProxyUser) + } + } + transport.Proxy = http.ProxyURL(proxyURL) + } + client = &http.Client{Transport: transport} + if !config.RedirectEnabled { + disableHTTPRedirect(client) + } + } + + conn.config = config + conn.url = urlMaker + conn.client = client + + return nil +} + +// Do sends request and returns the response +func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + urlParams := conn.getURLParams(params) + subResource := conn.getSubResource(params) + uri := conn.url.getURL(bucketName, objectName, urlParams) + resource := conn.getResource(bucketName, objectName, subResource) + return conn.doRequest(method, uri, resource, headers, data, initCRC, listener) +} + +// DoURL sends the request with signed URL and returns the response result. +func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + // Get URI from signedURL + uri, err := url.ParseRequestURI(signedURL) + if err != nil { + return nil, err + } + + m := strings.ToUpper(string(method)) + req := &http.Request{ + Method: m, + URL: uri, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: uri.Host, + } + + tracker := &readerTracker{completedBytes: 0} + fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) + if fd != nil { + defer func() { + fd.Close() + os.Remove(fd.Name()) + }() + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + req.Header.Set(HTTPHeaderHost, req.Host) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + // Transfer started + event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0) + publishProgress(listener, event) + + if conn.config.LogLevel >= Debug { + conn.LoggerHTTPReq(req) + } + + resp, err := conn.client.Do(req) + if err != nil { + // Transfer failed + event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error()) + return nil, err + } + + if conn.config.LogLevel >= Debug { + //print out http resp + conn.LoggerHTTPResp(req, resp) + } + + // Transfer completed + event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + + return conn.handleResponse(resp, crc) +} + +func (conn Conn) getURLParams(params map[string]interface{}) string { + // Sort + keys := make([]string, 0, len(params)) + for k := range params { + keys = append(keys, k) + } + sort.Strings(keys) + + // Serialize + var buf bytes.Buffer + for _, k := range keys { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(url.QueryEscape(k)) + if params[k] != nil && params[k].(string) != "" { + buf.WriteString("=" + strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1)) + } + } + + return buf.String() +} + +func (conn Conn) getSubResource(params map[string]interface{}) string { + // Sort + keys := make([]string, 0, len(params)) + signParams := make(map[string]string) + for k := range params { + if conn.config.AuthVersion == AuthV2 { + encodedKey := url.QueryEscape(k) + keys = append(keys, encodedKey) + if params[k] != nil && params[k] != "" { + signParams[encodedKey] = strings.Replace(url.QueryEscape(params[k].(string)), "+", "%20", -1) + } + } else if conn.isParamSign(k) { + keys = append(keys, k) + if params[k] != nil { + signParams[k] = params[k].(string) + } + } + } + sort.Strings(keys) + + // Serialize + var buf bytes.Buffer + for _, k := range keys { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(k) + if _, ok := signParams[k]; ok { + if signParams[k] != "" { + buf.WriteString("=" + signParams[k]) + } + } + } + return buf.String() +} + +func (conn Conn) isParamSign(paramKey string) bool { + for _, k := range signKeyList { + if paramKey == k { + return true + } + } + return false +} + +// getResource gets canonicalized resource +func (conn Conn) getResource(bucketName, objectName, subResource string) string { + if subResource != "" { + subResource = "?" + subResource + } + if bucketName == "" { + if conn.config.AuthVersion == AuthV2 { + return url.QueryEscape("/") + subResource + } + return fmt.Sprintf("/%s%s", bucketName, subResource) + } + if conn.config.AuthVersion == AuthV2 { + return url.QueryEscape("/"+bucketName+"/") + strings.Replace(url.QueryEscape(objectName), "+", "%20", -1) + subResource + } + return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource) +} + +func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + method = strings.ToUpper(method) + req := &http.Request{ + Method: method, + URL: uri, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: uri.Host, + } + + tracker := &readerTracker{completedBytes: 0} + fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) + if fd != nil { + defer func() { + fd.Close() + os.Remove(fd.Name()) + }() + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(HTTPHeaderDate, date) + req.Header.Set(HTTPHeaderHost, req.Host) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + akIf := conn.config.GetCredentials() + if akIf.GetSecurityToken() != "" { + req.Header.Set(HTTPHeaderOssSecurityToken, akIf.GetSecurityToken()) + } + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + conn.signHeader(req, canonicalizedResource) + + // Transfer started + event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0) + publishProgress(listener, event) + + if conn.config.LogLevel >= Debug { + conn.LoggerHTTPReq(req) + } + + resp, err := conn.client.Do(req) + + if err != nil { + // Transfer failed + event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + conn.config.WriteLog(Debug, "[Resp:%p]http error:%s\n", req, err.Error()) + return nil, err + } + + if conn.config.LogLevel >= Debug { + //print out http resp + conn.LoggerHTTPResp(req, resp) + } + + // Transfer completed + event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + + return conn.handleResponse(resp, crc) +} + +func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string { + akIf := conn.config.GetCredentials() + if akIf.GetSecurityToken() != "" { + params[HTTPParamSecurityToken] = akIf.GetSecurityToken() + } + + m := strings.ToUpper(string(method)) + req := &http.Request{ + Method: m, + Header: make(http.Header), + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10)) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + if conn.config.AuthVersion == AuthV2 { + params[HTTPParamSignatureVersion] = "OSS2" + params[HTTPParamExpiresV2] = strconv.FormatInt(expiration, 10) + params[HTTPParamAccessKeyIDV2] = conn.config.AccessKeyID + additionalList, _ := conn.getAdditionalHeaderKeys(req) + if len(additionalList) > 0 { + params[HTTPParamAdditionalHeadersV2] = strings.Join(additionalList, ";") + } + } + + subResource := conn.getSubResource(params) + canonicalizedResource := conn.getResource(bucketName, objectName, subResource) + signedStr := conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()) + + if conn.config.AuthVersion == AuthV1 { + params[HTTPParamExpires] = strconv.FormatInt(expiration, 10) + params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID() + params[HTTPParamSignature] = signedStr + } else if conn.config.AuthVersion == AuthV2 { + params[HTTPParamSignatureV2] = signedStr + } + urlParams := conn.getURLParams(params) + return conn.url.getSignURL(bucketName, objectName, urlParams) +} + +func (conn Conn) signRtmpURL(bucketName, channelName, playlistName string, expiration int64) string { + params := map[string]interface{}{} + if playlistName != "" { + params[HTTPParamPlaylistName] = playlistName + } + expireStr := strconv.FormatInt(expiration, 10) + params[HTTPParamExpires] = expireStr + + akIf := conn.config.GetCredentials() + if akIf.GetAccessKeyID() != "" { + params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID() + if akIf.GetSecurityToken() != "" { + params[HTTPParamSecurityToken] = akIf.GetSecurityToken() + } + signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, akIf.GetAccessKeySecret(), params) + params[HTTPParamSignature] = signedStr + } + + urlParams := conn.getURLParams(params) + return conn.url.getSignRtmpURL(bucketName, channelName, urlParams) +} + +// handleBody handles request body +func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64, + listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) { + var file *os.File + var crc hash.Hash64 + reader := body + readerLen, err := GetReaderLen(reader) + if err == nil { + req.ContentLength = readerLen + } + req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10)) + + // MD5 + if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" { + md5 := "" + reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold) + req.Header.Set(HTTPHeaderContentMD5, md5) + } + + // CRC + if reader != nil && conn.config.IsEnableCRC { + crc = NewCRC(CrcTable(), initCRC) + reader = TeeReader(reader, crc, req.ContentLength, listener, tracker) + } + + // HTTP body + rc, ok := reader.(io.ReadCloser) + if !ok && reader != nil { + rc = ioutil.NopCloser(reader) + } + + if conn.isUploadLimitReq(req) { + limitReader := &LimitSpeedReader{ + reader: rc, + ossLimiter: conn.config.UploadLimiter, + } + req.Body = limitReader + } else { + req.Body = rc + } + return file, crc +} + +// isUploadLimitReq: judge limit upload speed or not +func (conn Conn) isUploadLimitReq(req *http.Request) bool { + if conn.config.UploadLimitSpeed == 0 || conn.config.UploadLimiter == nil { + return false + } + + if req.Method != "GET" && req.Method != "DELETE" && req.Method != "HEAD" { + if req.ContentLength > 0 { + return true + } + } + return false +} + +func tryGetFileSize(f *os.File) int64 { + fInfo, _ := f.Stat() + return fInfo.Size() +} + +// handleResponse handles response +func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) { + var cliCRC uint64 + var srvCRC uint64 + + statusCode := resp.StatusCode + if statusCode/100 != 2 { + if statusCode >= 400 && statusCode <= 505 { + // 4xx and 5xx indicate that the operation has error occurred + var respBody []byte + respBody, err := readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + err = ServiceError{ + StatusCode: statusCode, + RequestID: resp.Header.Get(HTTPHeaderOssRequestID), + } + } else { + // Response contains storage service error object, unmarshal + srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, + resp.Header.Get(HTTPHeaderOssRequestID)) + if errIn != nil { // error unmarshaling the error response + err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID)) + } else { + err = srvErr + } + } + + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body + }, err + } else if statusCode >= 300 && statusCode <= 307 { + // OSS use 3xx, but response has no body + err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status) + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: resp.Body, + }, err + } else { + // (0,300) [308,400) [506,) + // Other extended http StatusCode + var respBody []byte + respBody, err := readResponseBody(resp) + if err != nil { + return &Response{StatusCode: resp.StatusCode, Headers: resp.Header, Body: ioutil.NopCloser(bytes.NewReader(respBody))}, err + } + + if len(respBody) == 0 { + err = ServiceError{ + StatusCode: statusCode, + RequestID: resp.Header.Get(HTTPHeaderOssRequestID), + } + } else { + // Response contains storage service error object, unmarshal + srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, + resp.Header.Get(HTTPHeaderOssRequestID)) + if errIn != nil { // error unmarshaling the error response + err = fmt.Errorf("unkown response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID)) + } else { + err = srvErr + } + } + + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body + }, err + } + } else { + if conn.config.IsEnableCRC && crc != nil { + cliCRC = crc.Sum64() + } + srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64) + + realBody := resp.Body + if conn.isDownloadLimitResponse(resp) { + limitReader := &LimitSpeedReader{ + reader: realBody, + ossLimiter: conn.config.DownloadLimiter, + } + realBody = limitReader + } + + // 2xx, successful + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: realBody, + ClientCRC: cliCRC, + ServerCRC: srvCRC, + }, nil + } +} + +// isUploadLimitReq: judge limit upload speed or not +func (conn Conn) isDownloadLimitResponse(resp *http.Response) bool { + if resp == nil || conn.config.DownloadLimitSpeed == 0 || conn.config.DownloadLimiter == nil { + return false + } + + if strings.EqualFold(resp.Request.Method, "GET") { + return true + } + return false +} + +// LoggerHTTPReq Print the header information of the http request +func (conn Conn) LoggerHTTPReq(req *http.Request) { + var logBuffer bytes.Buffer + logBuffer.WriteString(fmt.Sprintf("[Req:%p]Method:%s\t", req, req.Method)) + logBuffer.WriteString(fmt.Sprintf("Host:%s\t", req.URL.Host)) + logBuffer.WriteString(fmt.Sprintf("Path:%s\t", req.URL.Path)) + logBuffer.WriteString(fmt.Sprintf("Query:%s\t", req.URL.RawQuery)) + logBuffer.WriteString(fmt.Sprintf("Header info:")) + + for k, v := range req.Header { + var valueBuffer bytes.Buffer + for j := 0; j < len(v); j++ { + if j > 0 { + valueBuffer.WriteString(" ") + } + valueBuffer.WriteString(v[j]) + } + logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) + } + conn.config.WriteLog(Debug, "%s\n", logBuffer.String()) +} + +// LoggerHTTPResp Print Response to http request +func (conn Conn) LoggerHTTPResp(req *http.Request, resp *http.Response) { + var logBuffer bytes.Buffer + logBuffer.WriteString(fmt.Sprintf("[Resp:%p]StatusCode:%d\t", req, resp.StatusCode)) + logBuffer.WriteString(fmt.Sprintf("Header info:")) + for k, v := range resp.Header { + var valueBuffer bytes.Buffer + for j := 0; j < len(v); j++ { + if j > 0 { + valueBuffer.WriteString(" ") + } + valueBuffer.WriteString(v[j]) + } + logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) + } + conn.config.WriteLog(Debug, "%s\n", logBuffer.String()) +} + +func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) { + if contentLen == 0 || contentLen > md5Threshold { + // Huge body, use temporary file + tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix) + if tempFile != nil { + io.Copy(tempFile, body) + tempFile.Seek(0, os.SEEK_SET) + md5 := md5.New() + io.Copy(md5, tempFile) + sum := md5.Sum(nil) + b64 = base64.StdEncoding.EncodeToString(sum[:]) + tempFile.Seek(0, os.SEEK_SET) + reader = tempFile + } + } else { + // Small body, use memory + buf, _ := ioutil.ReadAll(body) + sum := md5.Sum(buf) + b64 = base64.StdEncoding.EncodeToString(sum[:]) + reader = bytes.NewReader(buf) + } + return +} + +func readResponseBody(resp *http.Response) ([]byte, error) { + defer resp.Body.Close() + out, err := ioutil.ReadAll(resp.Body) + if err == io.EOF { + err = nil + } + return out, err +} + +func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) { + var storageErr ServiceError + + if err := xml.Unmarshal(body, &storageErr); err != nil { + return storageErr, err + } + + storageErr.StatusCode = statusCode + storageErr.RequestID = requestID + storageErr.RawMessage = string(body) + return storageErr, nil +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +func jsonUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return json.Unmarshal(data, v) +} + +// timeoutConn handles HTTP timeout +type timeoutConn struct { + conn net.Conn + timeout time.Duration + longTimeout time.Duration +} + +func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn { + conn.SetReadDeadline(time.Now().Add(longTimeout)) + return &timeoutConn{ + conn: conn, + timeout: timeout, + longTimeout: longTimeout, + } +} + +func (c *timeoutConn) Read(b []byte) (n int, err error) { + c.SetReadDeadline(time.Now().Add(c.timeout)) + n, err = c.conn.Read(b) + c.SetReadDeadline(time.Now().Add(c.longTimeout)) + return n, err +} + +func (c *timeoutConn) Write(b []byte) (n int, err error) { + c.SetWriteDeadline(time.Now().Add(c.timeout)) + n, err = c.conn.Write(b) + c.SetReadDeadline(time.Now().Add(c.longTimeout)) + return n, err +} + +func (c *timeoutConn) Close() error { + return c.conn.Close() +} + +func (c *timeoutConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *timeoutConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *timeoutConn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +func (c *timeoutConn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +func (c *timeoutConn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +// UrlMaker builds URL and resource +const ( + urlTypeCname = 1 + urlTypeIP = 2 + urlTypeAliyun = 3 +) + +type urlMaker struct { + Scheme string // HTTP or HTTPS + NetLoc string // Host or IP + Type int // 1 CNAME, 2 IP, 3 ALIYUN + IsProxy bool // Proxy +} + +// Init parses endpoint +func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) error { + if strings.HasPrefix(endpoint, "http://") { + um.Scheme = "http" + um.NetLoc = endpoint[len("http://"):] + } else if strings.HasPrefix(endpoint, "https://") { + um.Scheme = "https" + um.NetLoc = endpoint[len("https://"):] + } else { + um.Scheme = "http" + um.NetLoc = endpoint + } + + //use url.Parse() to get real host + strUrl := um.Scheme + "://" + um.NetLoc + url, err := url.Parse(strUrl) + if err != nil { + return err + } + + um.NetLoc = url.Host + host, _, err := net.SplitHostPort(um.NetLoc) + if err != nil { + host = um.NetLoc + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + } + + ip := net.ParseIP(host) + if ip != nil { + um.Type = urlTypeIP + } else if isCname { + um.Type = urlTypeCname + } else { + um.Type = urlTypeAliyun + } + um.IsProxy = isProxy + + return nil +} + +// getURL gets URL +func (um urlMaker) getURL(bucket, object, params string) *url.URL { + host, path := um.buildURL(bucket, object) + addr := "" + if params == "" { + addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path) + } else { + addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) + } + uri, _ := url.ParseRequestURI(addr) + return uri +} + +// getSignURL gets sign URL +func (um urlMaker) getSignURL(bucket, object, params string) string { + host, path := um.buildURL(bucket, object) + return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) +} + +// getSignRtmpURL Build Sign Rtmp URL +func (um urlMaker) getSignRtmpURL(bucket, channelName, params string) string { + host, path := um.buildURL(bucket, "live") + + channelName = url.QueryEscape(channelName) + channelName = strings.Replace(channelName, "+", "%20", -1) + + return fmt.Sprintf("rtmp://%s%s/%s?%s", host, path, channelName, params) +} + +// buildURL builds URL +func (um urlMaker) buildURL(bucket, object string) (string, string) { + var host = "" + var path = "" + + object = url.QueryEscape(object) + object = strings.Replace(object, "+", "%20", -1) + + if um.Type == urlTypeCname { + host = um.NetLoc + path = "/" + object + } else if um.Type == urlTypeIP { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = um.NetLoc + path = fmt.Sprintf("/%s/%s", bucket, object) + } + } else { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = bucket + "." + um.NetLoc + path = "/" + object + } + } + + return host, path +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go new file mode 100644 index 000000000000..47e1b0f10f1a --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go @@ -0,0 +1,258 @@ +package oss + +import "os" + +// ACLType bucket/object ACL +type ACLType string + +const ( + // ACLPrivate definition : private read and write + ACLPrivate ACLType = "private" + + // ACLPublicRead definition : public read and private write + ACLPublicRead ACLType = "public-read" + + // ACLPublicReadWrite definition : public read and public write + ACLPublicReadWrite ACLType = "public-read-write" + + // ACLDefault Object. It's only applicable for object. + ACLDefault ACLType = "default" +) + +// bucket versioning status +type VersioningStatus string + +const ( + // Versioning Status definition: Enabled + VersionEnabled VersioningStatus = "Enabled" + + // Versioning Status definition: Suspended + VersionSuspended VersioningStatus = "Suspended" +) + +// MetadataDirectiveType specifying whether use the metadata of source object when copying object. +type MetadataDirectiveType string + +const ( + // MetaCopy the target object's metadata is copied from the source one + MetaCopy MetadataDirectiveType = "COPY" + + // MetaReplace the target object's metadata is created as part of the copy request (not same as the source one) + MetaReplace MetadataDirectiveType = "REPLACE" +) + +// TaggingDirectiveType specifying whether use the tagging of source object when copying object. +type TaggingDirectiveType string + +const ( + // TaggingCopy the target object's tagging is copied from the source one + TaggingCopy TaggingDirectiveType = "COPY" + + // TaggingReplace the target object's tagging is created as part of the copy request (not same as the source one) + TaggingReplace TaggingDirectiveType = "REPLACE" +) + +// AlgorithmType specifying the server side encryption algorithm name +type AlgorithmType string + +const ( + KMSAlgorithm AlgorithmType = "KMS" + AESAlgorithm AlgorithmType = "AES256" + SM4Algorithm AlgorithmType = "SM4" +) + +// StorageClassType bucket storage type +type StorageClassType string + +const ( + // StorageStandard standard + StorageStandard StorageClassType = "Standard" + + // StorageIA infrequent access + StorageIA StorageClassType = "IA" + + // StorageArchive archive + StorageArchive StorageClassType = "Archive" + + // StorageColdArchive cold archive + StorageColdArchive StorageClassType = "ColdArchive" +) + +//RedundancyType bucket data Redundancy type +type DataRedundancyType string + +const ( + // RedundancyLRS Local redundancy, default value + RedundancyLRS DataRedundancyType = "LRS" + + // RedundancyZRS Same city redundancy + RedundancyZRS DataRedundancyType = "ZRS" +) + +//ObjecthashFuncType +type ObjecthashFuncType string + +const ( + HashFuncSha1 ObjecthashFuncType = "SHA-1" + HashFuncSha256 ObjecthashFuncType = "SHA-256" +) + +// PayerType the type of request payer +type PayerType string + +const ( + // Requester the requester who send the request + Requester PayerType = "Requester" + + // BucketOwner the requester who send the request + BucketOwner PayerType = "BucketOwner" +) + +//RestoreMode the restore mode for coldArchive object +type RestoreMode string + +const ( + //RestoreExpedited object will be restored in 1 hour + RestoreExpedited RestoreMode = "Expedited" + + //RestoreStandard object will be restored in 2-5 hours + RestoreStandard RestoreMode = "Standard" + + //RestoreBulk object will be restored in 5-10 hours + RestoreBulk RestoreMode = "Bulk" +) + +// HTTPMethod HTTP request method +type HTTPMethod string + +const ( + // HTTPGet HTTP GET + HTTPGet HTTPMethod = "GET" + + // HTTPPut HTTP PUT + HTTPPut HTTPMethod = "PUT" + + // HTTPHead HTTP HEAD + HTTPHead HTTPMethod = "HEAD" + + // HTTPPost HTTP POST + HTTPPost HTTPMethod = "POST" + + // HTTPDelete HTTP DELETE + HTTPDelete HTTPMethod = "DELETE" +) + +// HTTP headers +const ( + HTTPHeaderAcceptEncoding string = "Accept-Encoding" + HTTPHeaderAuthorization = "Authorization" + HTTPHeaderCacheControl = "Cache-Control" + HTTPHeaderContentDisposition = "Content-Disposition" + HTTPHeaderContentEncoding = "Content-Encoding" + HTTPHeaderContentLength = "Content-Length" + HTTPHeaderContentMD5 = "Content-MD5" + HTTPHeaderContentType = "Content-Type" + HTTPHeaderContentLanguage = "Content-Language" + HTTPHeaderDate = "Date" + HTTPHeaderEtag = "ETag" + HTTPHeaderExpires = "Expires" + HTTPHeaderHost = "Host" + HTTPHeaderLastModified = "Last-Modified" + HTTPHeaderRange = "Range" + HTTPHeaderLocation = "Location" + HTTPHeaderOrigin = "Origin" + HTTPHeaderServer = "Server" + HTTPHeaderUserAgent = "User-Agent" + HTTPHeaderIfModifiedSince = "If-Modified-Since" + HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since" + HTTPHeaderIfMatch = "If-Match" + HTTPHeaderIfNoneMatch = "If-None-Match" + HTTPHeaderACReqMethod = "Access-Control-Request-Method" + HTTPHeaderACReqHeaders = "Access-Control-Request-Headers" + + HTTPHeaderOssACL = "X-Oss-Acl" + HTTPHeaderOssMetaPrefix = "X-Oss-Meta-" + HTTPHeaderOssObjectACL = "X-Oss-Object-Acl" + HTTPHeaderOssSecurityToken = "X-Oss-Security-Token" + HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption" + HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id" + HTTPHeaderOssServerSideDataEncryption = "X-Oss-Server-Side-Data-Encryption" + HTTPHeaderSSECAlgorithm = "X-Oss-Server-Side-Encryption-Customer-Algorithm" + HTTPHeaderSSECKey = "X-Oss-Server-Side-Encryption-Customer-Key" + HTTPHeaderSSECKeyMd5 = "X-Oss-Server-Side-Encryption-Customer-Key-MD5" + HTTPHeaderOssCopySource = "X-Oss-Copy-Source" + HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range" + HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match" + HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match" + HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since" + HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since" + HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive" + HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position" + HTTPHeaderOssRequestID = "X-Oss-Request-Id" + HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma" + HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target" + HTTPHeaderOssStorageClass = "X-Oss-Storage-Class" + HTTPHeaderOssCallback = "X-Oss-Callback" + HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var" + HTTPHeaderOssRequester = "X-Oss-Request-Payer" + HTTPHeaderOssTagging = "X-Oss-Tagging" + HTTPHeaderOssTaggingDirective = "X-Oss-Tagging-Directive" + HTTPHeaderOssTrafficLimit = "X-Oss-Traffic-Limit" + HTTPHeaderOssForbidOverWrite = "X-Oss-Forbid-Overwrite" + HTTPHeaderOssRangeBehavior = "X-Oss-Range-Behavior" + HTTPHeaderOssTaskID = "X-Oss-Task-Id" + HTTPHeaderOssHashCtx = "X-Oss-Hash-Ctx" + HTTPHeaderOssMd5Ctx = "X-Oss-Md5-Ctx" + HTTPHeaderAllowSameActionOverLap = "X-Oss-Allow-Same-Action-Overlap" +) + +// HTTP Param +const ( + HTTPParamExpires = "Expires" + HTTPParamAccessKeyID = "OSSAccessKeyId" + HTTPParamSignature = "Signature" + HTTPParamSecurityToken = "security-token" + HTTPParamPlaylistName = "playlistName" + + HTTPParamSignatureVersion = "x-oss-signature-version" + HTTPParamExpiresV2 = "x-oss-expires" + HTTPParamAccessKeyIDV2 = "x-oss-access-key-id" + HTTPParamSignatureV2 = "x-oss-signature" + HTTPParamAdditionalHeadersV2 = "x-oss-additional-headers" +) + +// Other constants +const ( + MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB + MinPartSize = 100 * 1024 // Min part size, 100KB + + FilePermMode = os.FileMode(0664) // Default file permission + + TempFilePrefix = "oss-go-temp-" // Temp file prefix + TempFileSuffix = ".temp" // Temp file suffix + + CheckpointFileSuffix = ".cp" // Checkpoint file suffix + + NullVersion = "null" + + Version = "v2.2.2" // Go SDK version +) + +// FrameType +const ( + DataFrameType = 8388609 + ContinuousFrameType = 8388612 + EndFrameType = 8388613 + MetaEndFrameCSVType = 8388614 + MetaEndFrameJSONType = 8388615 +) + +// AuthVersion the version of auth +type AuthVersionType string + +const ( + // AuthV1 v1 + AuthV1 AuthVersionType = "v1" + // AuthV2 v2 + AuthV2 AuthVersionType = "v2" +) diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go new file mode 100644 index 000000000000..c96694f2850d --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go @@ -0,0 +1,123 @@ +package oss + +import ( + "hash" + "hash/crc64" +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint64 + tab *crc64.Table +} + +// NewCRC creates a new hash.Hash64 computing the CRC64 checksum +// using the polynomial represented by the Table. +func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} } + +// Size returns the number of bytes sum will return. +func (d *digest) Size() int { return crc64.Size } + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (d *digest) BlockSize() int { return 1 } + +// Reset resets the hash to its initial state. +func (d *digest) Reset() { d.crc = 0 } + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (d *digest) Write(p []byte) (n int, err error) { + d.crc = crc64.Update(d.crc, d.tab, p) + return len(p), nil +} + +// Sum64 returns CRC64 value. +func (d *digest) Sum64() uint64 { return d.crc } + +// Sum returns hash value. +func (d *digest) Sum(in []byte) []byte { + s := d.Sum64() + return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// gf2Dim dimension of GF(2) vectors (length of CRC) +const gf2Dim int = 64 + +func gf2MatrixTimes(mat []uint64, vec uint64) uint64 { + var sum uint64 + for i := 0; vec != 0; i++ { + if vec&1 != 0 { + sum ^= mat[i] + } + + vec >>= 1 + } + return sum +} + +func gf2MatrixSquare(square []uint64, mat []uint64) { + for n := 0; n < gf2Dim; n++ { + square[n] = gf2MatrixTimes(mat, mat[n]) + } +} + +// CRC64Combine combines CRC64 +func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 { + var even [gf2Dim]uint64 // Even-power-of-two zeros operator + var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator + + // Degenerate case + if len2 == 0 { + return crc1 + } + + // Put operator for one zero bit in odd + odd[0] = crc64.ECMA // CRC64 polynomial + var row uint64 = 1 + for n := 1; n < gf2Dim; n++ { + odd[n] = row + row <<= 1 + } + + // Put operator for two zero bits in even + gf2MatrixSquare(even[:], odd[:]) + + // Put operator for four zero bits in odd + gf2MatrixSquare(odd[:], even[:]) + + // Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even + for { + // Apply zeros operator for this bit of len2 + gf2MatrixSquare(even[:], odd[:]) + + if len2&1 != 0 { + crc1 = gf2MatrixTimes(even[:], crc1) + } + + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + + // Another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd[:], even[:]) + if len2&1 != 0 { + crc1 = gf2MatrixTimes(odd[:], crc1) + } + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + } + + // Return combined CRC + crc1 ^= crc2 + return crc1 +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go new file mode 100644 index 000000000000..90c1b633d9ed --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go @@ -0,0 +1,567 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "hash/crc64" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strconv" + "time" +) + +// DownloadFile downloads files with multipart download. +// +// objectKey the object key. +// filePath the local file to download from objectKey in OSS. +// partSize the part size in bytes. +// options object's constraints, check out GetObject for the reference. +// +// error it's nil when the call succeeds, otherwise it's an error object. +// +func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error { + if partSize < 1 { + return errors.New("oss: part size smaller than 1") + } + + uRange, err := GetRangeConfig(options) + if err != nil { + return err + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + var strVersionId string + versionId, _ := FindOption(options, "versionId", nil) + if versionId != nil { + strVersionId = versionId.(string) + } + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, strVersionId, filePath) + if cpFilePath != "" { + return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange) + } + } + + return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange) +} + +func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, versionId, destFile string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject) + absPath, _ := filepath.Abs(destFile) + cpFileName := getCpFileName(src, absPath, versionId) + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// downloadWorkerArg is download worker's parameters +type downloadWorkerArg struct { + bucket *Bucket + key string + filePath string + options []Option + hook downloadPartHook + enableCRC bool +} + +// downloadPartHook is hook for test +type downloadPartHook func(part downloadPart) error + +var downloadPartHooker downloadPartHook = defaultDownloadPartHook + +func defaultDownloadPartHook(part downloadPart) error { + return nil +} + +// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject. +type defaultDownloadProgressListener struct { +} + +// ProgressChanged no-ops +func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) { +} + +// downloadWorker +func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) { + for part := range jobs { + if err := arg.hook(part); err != nil { + failed <- err + break + } + + // Resolve options + r := Range(part.Start, part.End) + p := Progress(&defaultDownloadProgressListener{}) + + var respHeader http.Header + opts := make([]Option, len(arg.options)+3) + // Append orderly, can not be reversed! + opts = append(opts, arg.options...) + opts = append(opts, r, p, GetResponseHeader(&respHeader)) + + rd, err := arg.bucket.GetObject(arg.key, opts...) + if err != nil { + failed <- err + break + } + defer rd.Close() + + var crcCalc hash.Hash64 + if arg.enableCRC { + crcCalc = crc64.New(CrcTable()) + contentLen := part.End - part.Start + 1 + rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil)) + } + defer rd.Close() + + select { + case <-die: + return + default: + } + + fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode) + if err != nil { + failed <- err + break + } + + _, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET) + if err != nil { + fd.Close() + failed <- err + break + } + + startT := time.Now().UnixNano() / 1000 / 1000 / 1000 + _, err = io.Copy(fd, rd) + endT := time.Now().UnixNano() / 1000 / 1000 / 1000 + if err != nil { + arg.bucket.Client.Config.WriteLog(Debug, "download part error,cost:%d second,part number:%d,request id:%s,error:%s.\n", endT-startT, part.Index, GetRequestId(respHeader), err.Error()) + fd.Close() + failed <- err + break + } + + if arg.enableCRC { + part.CRC64 = crcCalc.Sum64() + } + + fd.Close() + results <- part + } +} + +// downloadScheduler +func downloadScheduler(jobs chan downloadPart, parts []downloadPart) { + for _, part := range parts { + jobs <- part + } + close(jobs) +} + +// downloadPart defines download part +type downloadPart struct { + Index int // Part number, starting from 0 + Start int64 // Start index + End int64 // End index + Offset int64 // Offset + CRC64 uint64 // CRC check value of part +} + +// getDownloadParts gets download parts +func getDownloadParts(objectSize, partSize int64, uRange *UnpackedRange) []downloadPart { + parts := []downloadPart{} + part := downloadPart{} + i := 0 + start, end := AdjustRange(uRange, objectSize) + for offset := start; offset < end; offset += partSize { + part.Index = i + part.Start = offset + part.End = GetPartEnd(offset, end, partSize) + part.Offset = start + part.CRC64 = 0 + parts = append(parts, part) + i++ + } + return parts +} + +// getObjectBytes gets object bytes length +func getObjectBytes(parts []downloadPart) int64 { + var ob int64 + for _, part := range parts { + ob += (part.End - part.Start + 1) + } + return ob +} + +// combineCRCInParts caculates the total CRC of continuous parts +func combineCRCInParts(dps []downloadPart) uint64 { + if dps == nil || len(dps) == 0 { + return 0 + } + + crc := dps[0].CRC64 + for i := 1; i < len(dps); i++ { + crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1)) + } + + return crc +} + +// downloadFile downloads file concurrently without checkpoint. +func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *UnpackedRange) error { + tempFilePath := filePath + TempFileSuffix + listener := GetProgressListener(options) + + // If the file does not exist, create one. If exists, the download will overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) + if err != nil { + return err + } + fd.Close() + + // Get the object detailed meta for object whole size + // must delete header:range to get whole object size + skipOptions := DeleteOption(options, HTTPHeaderRange) + meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...) + if err != nil { + return err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return err + } + + enableCRC := false + expectedCRC := (uint64)(0) + if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { + if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) { + enableCRC = true + expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64) + } + } + + // Get the parts of the file + parts := getDownloadParts(objectSize, partSize, uRange) + jobs := make(chan downloadPart, len(parts)) + results := make(chan downloadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getObjectBytes(parts) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) + publishProgress(listener, event) + + // Start the download workers + arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC} + for w := 1; w <= routines; w++ { + go downloadWorker(w, arg, jobs, results, failed, die) + } + + // Download parts concurrently + go downloadScheduler(jobs, parts) + + // Waiting for parts download finished + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + downBytes := (part.End - part.Start + 1) + completedBytes += downBytes + parts[part.Index].CRC64 = part.CRC64 + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, downBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + + if enableCRC { + actualCRC := combineCRCInParts(parts) + err = CheckDownloadCRC(actualCRC, expectedCRC) + if err != nil { + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// ----- Concurrent download with chcekpoint ----- + +const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3" + +type downloadCheckpoint struct { + Magic string // Magic + MD5 string // Checkpoint content MD5 + FilePath string // Local file + Object string // Key + ObjStat objectStat // Object status + Parts []downloadPart // All download parts + PartStat []bool // Parts' download status + Start int64 // Start point of the file + End int64 // End point of the file + enableCRC bool // Whether has CRC check + CRC uint64 // CRC check value +} + +type objectStat struct { + Size int64 // Object size + LastModified string // Last modified time + Etag string // Etag +} + +// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated. +func (cp downloadCheckpoint) isValid(meta http.Header, uRange *UnpackedRange) (bool, error) { + // Compare the CP's Magic and the MD5 + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != downloadCpMagic || b64 != cp.MD5 { + return false, nil + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return false, err + } + + // Compare the object size, last modified time and etag + if cp.ObjStat.Size != objectSize || + cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || + cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { + return false, nil + } + + // Check the download range + if uRange != nil { + start, end := AdjustRange(uRange, objectSize) + if start != cp.Start || end != cp.End { + return false, nil + } + } + + return true, nil +} + +// load checkpoint from local file +func (cp *downloadCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// dump funciton dumps to file +func (cp *downloadCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialize + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// todoParts gets unfinished parts +func (cp downloadCheckpoint) todoParts() []downloadPart { + dps := []downloadPart{} + for i, ps := range cp.PartStat { + if !ps { + dps = append(dps, cp.Parts[i]) + } + } + return dps +} + +// getCompletedBytes gets completed size +func (cp downloadCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for i, part := range cp.Parts { + if cp.PartStat[i] { + completedBytes += (part.End - part.Start + 1) + } + } + return completedBytes +} + +// prepare initiates download tasks +func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *UnpackedRange) error { + // CP + cp.Magic = downloadCpMagic + cp.FilePath = filePath + cp.Object = objectKey + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return err + } + + cp.ObjStat.Size = objectSize + cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) + cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) + + if bucket.GetConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { + if uRange == nil || (!uRange.HasStart && !uRange.HasEnd) { + cp.enableCRC = true + cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 64) + } + } + + // Parts + cp.Parts = getDownloadParts(objectSize, partSize, uRange) + cp.PartStat = make([]bool, len(cp.Parts)) + for i := range cp.PartStat { + cp.PartStat[i] = false + } + + return nil +} + +func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error { + err := os.Rename(downFilepath, cp.FilePath) + if err != nil { + return err + } + return os.Remove(cpFilePath) +} + +// downloadFileWithCp downloads files with checkpoint. +func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *UnpackedRange) error { + tempFilePath := filePath + TempFileSuffix + listener := GetProgressListener(options) + + // Load checkpoint data. + dcp := downloadCheckpoint{} + err := dcp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // Get the object detailed meta for object whole size + // must delete header:range to get whole object size + skipOptions := DeleteOption(options, HTTPHeaderRange) + meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...) + if err != nil { + return err + } + + // Load error or data invalid. Re-initialize the download. + valid, err := dcp.isValid(meta, uRange) + if err != nil || !valid { + if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil { + return err + } + os.Remove(cpFilePath) + } + + // Create the file if not exists. Otherwise the parts download will overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) + if err != nil { + return err + } + fd.Close() + + // Unfinished parts + parts := dcp.todoParts() + jobs := make(chan downloadPart, len(parts)) + results := make(chan downloadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := dcp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size, 0) + publishProgress(listener, event) + + // Start the download workers routine + arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC} + for w := 1; w <= routines; w++ { + go downloadWorker(w, arg, jobs, results, failed, die) + } + + // Concurrently downloads parts + go downloadScheduler(jobs, parts) + + // Wait for the parts download finished + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + dcp.PartStat[part.Index] = true + dcp.Parts[part.Index].CRC64 = part.CRC64 + dcp.dump(cpFilePath) + downBytes := (part.End - part.Start + 1) + completedBytes += downBytes + event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size, downBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size, 0) + publishProgress(listener, event) + + if dcp.enableCRC { + actualCRC := combineCRCInParts(dcp.Parts) + err = CheckDownloadCRC(actualCRC, dcp.CRC) + if err != nil { + return err + } + } + + return dcp.complete(cpFilePath, tempFilePath) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go new file mode 100644 index 000000000000..a877211fad75 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go @@ -0,0 +1,94 @@ +package oss + +import ( + "encoding/xml" + "fmt" + "net/http" + "strings" +) + +// ServiceError contains fields of the error response from Oss Service REST API. +type ServiceError struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` // The error code returned from OSS to the caller + Message string `xml:"Message"` // The detail error message from OSS + RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request + HostID string `xml:"HostId"` // The OSS server cluster's Id + Endpoint string `xml:"Endpoint"` + RawMessage string // The raw messages from OSS + StatusCode int // HTTP status code +} + +// Error implements interface error +func (e ServiceError) Error() string { + if e.Endpoint == "" { + return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s", + e.StatusCode, e.Code, e.Message, e.RequestID) + } + return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s, Endpoint=%s", + e.StatusCode, e.Code, e.Message, e.RequestID, e.Endpoint) +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int // The expected HTTP stats code returned from OSS + got int // The actual HTTP status code from OSS +} + +// Error implements interface error +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("oss: status code from service response is %s; was expecting %s", + got, strings.Join(expected, " or ")) +} + +// Got is the actual status code returned by oss. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// CheckRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func CheckRespCode(respCode int, allowed []int) error { + for _, v := range allowed { + if respCode == v { + return nil + } + } + return UnexpectedStatusCodeError{allowed, respCode} +} + +// CRCCheckError is returned when crc check is inconsistent between client and server +type CRCCheckError struct { + clientCRC uint64 // Calculated CRC64 in client + serverCRC uint64 // Calculated CRC64 in server + operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc + requestID string // The request id of this operation +} + +// Error implements interface error +func (e CRCCheckError) Error() string { + return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s", + e.operation, e.clientCRC, e.serverCRC, e.requestID) +} + +func CheckDownloadCRC(clientCRC, serverCRC uint64) error { + if clientCRC == serverCRC { + return nil + } + return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""} +} + +func CheckCRC(resp *Response, operation string) error { + if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC { + return nil + } + return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)} +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go new file mode 100644 index 000000000000..943dc8fd0d41 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go @@ -0,0 +1,28 @@ +// +build !go1.7 + +// "golang.org/x/time/rate" is depended on golang context package go1.7 onward +// this file is only for build,not supports limit upload speed +package oss + +import ( + "fmt" + "io" +) + +const ( + perTokenBandwidthSize int = 1024 +) + +type OssLimiter struct { +} + +type LimitSpeedReader struct { + io.ReadCloser + reader io.Reader + ossLimiter *OssLimiter +} + +func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) { + err = fmt.Errorf("rate.Limiter is not supported below version go1.7") + return nil, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go new file mode 100644 index 000000000000..f6baf29877bb --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go @@ -0,0 +1,90 @@ +// +build go1.7 + +package oss + +import ( + "fmt" + "io" + "math" + "time" + + "golang.org/x/time/rate" +) + +const ( + perTokenBandwidthSize int = 1024 +) + +// OssLimiter wrapper rate.Limiter +type OssLimiter struct { + limiter *rate.Limiter +} + +// GetOssLimiter create OssLimiter +// uploadSpeed KB/s +func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) { + limiter := rate.NewLimiter(rate.Limit(uploadSpeed), uploadSpeed) + + // first consume the initial full token,the limiter will behave more accurately + limiter.AllowN(time.Now(), uploadSpeed) + + return &OssLimiter{ + limiter: limiter, + }, nil +} + +// LimitSpeedReader for limit bandwidth upload +type LimitSpeedReader struct { + io.ReadCloser + reader io.Reader + ossLimiter *OssLimiter +} + +// Read +func (r *LimitSpeedReader) Read(p []byte) (n int, err error) { + n = 0 + err = nil + start := 0 + burst := r.ossLimiter.limiter.Burst() + var end int + var tmpN int + var tc int + for start < len(p) { + if start+burst*perTokenBandwidthSize < len(p) { + end = start + burst*perTokenBandwidthSize + } else { + end = len(p) + } + + tmpN, err = r.reader.Read(p[start:end]) + if tmpN > 0 { + n += tmpN + start = n + } + + if err != nil { + return + } + + tc = int(math.Ceil(float64(tmpN) / float64(perTokenBandwidthSize))) + now := time.Now() + re := r.ossLimiter.limiter.ReserveN(now, tc) + if !re.OK() { + err = fmt.Errorf("LimitSpeedReader.Read() failure,ReserveN error,start:%d,end:%d,burst:%d,perTokenBandwidthSize:%d", + start, end, burst, perTokenBandwidthSize) + return + } + timeDelay := re.Delay() + time.Sleep(timeDelay) + } + return +} + +// Close ... +func (r *LimitSpeedReader) Close() error { + rc, ok := r.reader.(io.ReadCloser) + if ok { + return rc.Close() + } + return nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go new file mode 100644 index 000000000000..bf5ba070bb2f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go @@ -0,0 +1,257 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "strconv" + "time" +) + +// +// CreateLiveChannel create a live-channel +// +// channelName the name of the channel +// config configuration of the channel +// +// CreateLiveChannelResult the result of create live-channel +// error nil if success, otherwise error +// +func (bucket Bucket) CreateLiveChannel(channelName string, config LiveChannelConfiguration) (CreateLiveChannelResult, error) { + var out CreateLiveChannelResult + + bs, err := xml.Marshal(config) + if err != nil { + return out, err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params := map[string]interface{}{} + params["live"] = nil + resp, err := bucket.do("PUT", channelName, params, nil, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// PutLiveChannelStatus Set the status of the live-channel: enabled/disabled +// +// channelName the name of the channel +// status enabled/disabled +// +// error nil if success, otherwise error +// +func (bucket Bucket) PutLiveChannelStatus(channelName, status string) error { + params := map[string]interface{}{} + params["live"] = nil + params["status"] = status + + resp, err := bucket.do("PUT", channelName, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// PostVodPlaylist create an playlist based on the specified playlist name, startTime and endTime +// +// channelName the name of the channel +// playlistName the name of the playlist, must end with ".m3u8" +// startTime the start time of the playlist +// endTime the endtime of the playlist +// +// error nil if success, otherwise error +// +func (bucket Bucket) PostVodPlaylist(channelName, playlistName string, startTime, endTime time.Time) error { + params := map[string]interface{}{} + params["vod"] = nil + params["startTime"] = strconv.FormatInt(startTime.Unix(), 10) + params["endTime"] = strconv.FormatInt(endTime.Unix(), 10) + + key := fmt.Sprintf("%s/%s", channelName, playlistName) + resp, err := bucket.do("POST", key, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetVodPlaylist get the playlist based on the specified channelName, startTime and endTime +// +// channelName the name of the channel +// startTime the start time of the playlist +// endTime the endtime of the playlist +// +// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil. +// error nil if success, otherwise error +// +func (bucket Bucket) GetVodPlaylist(channelName string, startTime, endTime time.Time) (io.ReadCloser, error) { + params := map[string]interface{}{} + params["vod"] = nil + params["startTime"] = strconv.FormatInt(startTime.Unix(), 10) + params["endTime"] = strconv.FormatInt(endTime.Unix(), 10) + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return nil, err + } + + return resp.Body, nil +} + +// +// GetLiveChannelStat Get the state of the live-channel +// +// channelName the name of the channel +// +// LiveChannelStat the state of the live-channel +// error nil if success, otherwise error +// +func (bucket Bucket) GetLiveChannelStat(channelName string) (LiveChannelStat, error) { + var out LiveChannelStat + params := map[string]interface{}{} + params["live"] = nil + params["comp"] = "stat" + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// GetLiveChannelInfo Get the configuration info of the live-channel +// +// channelName the name of the channel +// +// LiveChannelConfiguration the configuration info of the live-channel +// error nil if success, otherwise error +// +func (bucket Bucket) GetLiveChannelInfo(channelName string) (LiveChannelConfiguration, error) { + var out LiveChannelConfiguration + params := map[string]interface{}{} + params["live"] = nil + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// GetLiveChannelHistory Get push records of live-channel +// +// channelName the name of the channel +// +// LiveChannelHistory push records +// error nil if success, otherwise error +// +func (bucket Bucket) GetLiveChannelHistory(channelName string) (LiveChannelHistory, error) { + var out LiveChannelHistory + params := map[string]interface{}{} + params["live"] = nil + params["comp"] = "history" + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// ListLiveChannel list the live-channels +// +// options Prefix: filter by the name start with the value of "Prefix" +// MaxKeys: the maximum count returned +// Marker: cursor from which starting list +// +// ListLiveChannelResult live-channel list +// error nil if success, otherwise error +// +func (bucket Bucket) ListLiveChannel(options ...Option) (ListLiveChannelResult, error) { + var out ListLiveChannelResult + + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + params["live"] = nil + + resp, err := bucket.do("GET", "", params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// DeleteLiveChannel Delete the live-channel. When a client trying to stream the live-channel, the operation will fail. it will only delete the live-channel itself and the object generated by the live-channel will not be deleted. +// +// channelName the name of the channel +// +// error nil if success, otherwise error +// +func (bucket Bucket) DeleteLiveChannel(channelName string) error { + params := map[string]interface{}{} + params["live"] = nil + + if channelName == "" { + return fmt.Errorf("invalid argument: channel name is empty") + } + + resp, err := bucket.do("DELETE", channelName, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// SignRtmpURL Generate a RTMP push-stream signature URL for the trusted user to push the RTMP stream to the live-channel. +// +// channelName the name of the channel +// playlistName the name of the playlist, must end with ".m3u8" +// expires expiration (in seconds) +// +// string singed rtmp push stream url +// error nil if success, otherwise error +// +func (bucket Bucket) SignRtmpURL(channelName, playlistName string, expires int64) (string, error) { + if expires <= 0 { + return "", fmt.Errorf("invalid argument: %d, expires must greater than 0", expires) + } + expiration := time.Now().Unix() + expires + + return bucket.Client.Conn.signRtmpURL(bucket.BucketName, channelName, playlistName, expiration), nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go new file mode 100644 index 000000000000..64f4dcc638be --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go @@ -0,0 +1,572 @@ +package oss + +import ( + "mime" + "path" + "strings" +) + +var extToMimeType = map[string]string{ + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", + ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", + ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", + ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", + ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", + ".apk": "application/vnd.android.package-archive", + ".hqx": "application/mac-binhex40", + ".cpt": "application/mac-compactpro", + ".doc": "application/msword", + ".ogg": "application/ogg", + ".pdf": "application/pdf", + ".rtf": "text/rtf", + ".mif": "application/vnd.mif", + ".xls": "application/vnd.ms-excel", + ".ppt": "application/vnd.ms-powerpoint", + ".odc": "application/vnd.oasis.opendocument.chart", + ".odb": "application/vnd.oasis.opendocument.database", + ".odf": "application/vnd.oasis.opendocument.formula", + ".odg": "application/vnd.oasis.opendocument.graphics", + ".otg": "application/vnd.oasis.opendocument.graphics-template", + ".odi": "application/vnd.oasis.opendocument.image", + ".odp": "application/vnd.oasis.opendocument.presentation", + ".otp": "application/vnd.oasis.opendocument.presentation-template", + ".ods": "application/vnd.oasis.opendocument.spreadsheet", + ".ots": "application/vnd.oasis.opendocument.spreadsheet-template", + ".odt": "application/vnd.oasis.opendocument.text", + ".odm": "application/vnd.oasis.opendocument.text-master", + ".ott": "application/vnd.oasis.opendocument.text-template", + ".oth": "application/vnd.oasis.opendocument.text-web", + ".sxw": "application/vnd.sun.xml.writer", + ".stw": "application/vnd.sun.xml.writer.template", + ".sxc": "application/vnd.sun.xml.calc", + ".stc": "application/vnd.sun.xml.calc.template", + ".sxd": "application/vnd.sun.xml.draw", + ".std": "application/vnd.sun.xml.draw.template", + ".sxi": "application/vnd.sun.xml.impress", + ".sti": "application/vnd.sun.xml.impress.template", + ".sxg": "application/vnd.sun.xml.writer.global", + ".sxm": "application/vnd.sun.xml.math", + ".sis": "application/vnd.symbian.install", + ".wbxml": "application/vnd.wap.wbxml", + ".wmlc": "application/vnd.wap.wmlc", + ".wmlsc": "application/vnd.wap.wmlscriptc", + ".bcpio": "application/x-bcpio", + ".torrent": "application/x-bittorrent", + ".bz2": "application/x-bzip2", + ".vcd": "application/x-cdlink", + ".pgn": "application/x-chess-pgn", + ".cpio": "application/x-cpio", + ".csh": "application/x-csh", + ".dvi": "application/x-dvi", + ".spl": "application/x-futuresplash", + ".gtar": "application/x-gtar", + ".hdf": "application/x-hdf", + ".jar": "application/x-java-archive", + ".jnlp": "application/x-java-jnlp-file", + ".js": "application/x-javascript", + ".ksp": "application/x-kspread", + ".chrt": "application/x-kchart", + ".kil": "application/x-killustrator", + ".latex": "application/x-latex", + ".rpm": "application/x-rpm", + ".sh": "application/x-sh", + ".shar": "application/x-shar", + ".swf": "application/x-shockwave-flash", + ".sit": "application/x-stuffit", + ".sv4cpio": "application/x-sv4cpio", + ".sv4crc": "application/x-sv4crc", + ".tar": "application/x-tar", + ".tcl": "application/x-tcl", + ".tex": "application/x-tex", + ".man": "application/x-troff-man", + ".me": "application/x-troff-me", + ".ms": "application/x-troff-ms", + ".ustar": "application/x-ustar", + ".src": "application/x-wais-source", + ".zip": "application/zip", + ".m3u": "audio/x-mpegurl", + ".ra": "audio/x-pn-realaudio", + ".wav": "audio/x-wav", + ".wma": "audio/x-ms-wma", + ".wax": "audio/x-ms-wax", + ".pdb": "chemical/x-pdb", + ".xyz": "chemical/x-xyz", + ".bmp": "image/bmp", + ".gif": "image/gif", + ".ief": "image/ief", + ".png": "image/png", + ".wbmp": "image/vnd.wap.wbmp", + ".ras": "image/x-cmu-raster", + ".pnm": "image/x-portable-anymap", + ".pbm": "image/x-portable-bitmap", + ".pgm": "image/x-portable-graymap", + ".ppm": "image/x-portable-pixmap", + ".rgb": "image/x-rgb", + ".xbm": "image/x-xbitmap", + ".xpm": "image/x-xpixmap", + ".xwd": "image/x-xwindowdump", + ".css": "text/css", + ".rtx": "text/richtext", + ".tsv": "text/tab-separated-values", + ".jad": "text/vnd.sun.j2me.app-descriptor", + ".wml": "text/vnd.wap.wml", + ".wmls": "text/vnd.wap.wmlscript", + ".etx": "text/x-setext", + ".mxu": "video/vnd.mpegurl", + ".flv": "video/x-flv", + ".wm": "video/x-ms-wm", + ".wmv": "video/x-ms-wmv", + ".wmx": "video/x-ms-wmx", + ".wvx": "video/x-ms-wvx", + ".avi": "video/x-msvideo", + ".movie": "video/x-sgi-movie", + ".ice": "x-conference/x-cooltalk", + ".3gp": "video/3gpp", + ".ai": "application/postscript", + ".aif": "audio/x-aiff", + ".aifc": "audio/x-aiff", + ".aiff": "audio/x-aiff", + ".asc": "text/plain", + ".atom": "application/atom+xml", + ".au": "audio/basic", + ".bin": "application/octet-stream", + ".cdf": "application/x-netcdf", + ".cgm": "image/cgm", + ".class": "application/octet-stream", + ".dcr": "application/x-director", + ".dif": "video/x-dv", + ".dir": "application/x-director", + ".djv": "image/vnd.djvu", + ".djvu": "image/vnd.djvu", + ".dll": "application/octet-stream", + ".dmg": "application/octet-stream", + ".dms": "application/octet-stream", + ".dtd": "application/xml-dtd", + ".dv": "video/x-dv", + ".dxr": "application/x-director", + ".eps": "application/postscript", + ".exe": "application/octet-stream", + ".ez": "application/andrew-inset", + ".gram": "application/srgs", + ".grxml": "application/srgs+xml", + ".gz": "application/x-gzip", + ".htm": "text/html", + ".html": "text/html", + ".ico": "image/x-icon", + ".ics": "text/calendar", + ".ifb": "text/calendar", + ".iges": "model/iges", + ".igs": "model/iges", + ".jp2": "image/jp2", + ".jpe": "image/jpeg", + ".jpeg": "image/jpeg", + ".jpg": "image/jpeg", + ".kar": "audio/midi", + ".lha": "application/octet-stream", + ".lzh": "application/octet-stream", + ".m4a": "audio/mp4a-latm", + ".m4p": "audio/mp4a-latm", + ".m4u": "video/vnd.mpegurl", + ".m4v": "video/x-m4v", + ".mac": "image/x-macpaint", + ".mathml": "application/mathml+xml", + ".mesh": "model/mesh", + ".mid": "audio/midi", + ".midi": "audio/midi", + ".mov": "video/quicktime", + ".mp2": "audio/mpeg", + ".mp3": "audio/mpeg", + ".mp4": "video/mp4", + ".mpe": "video/mpeg", + ".mpeg": "video/mpeg", + ".mpg": "video/mpeg", + ".mpga": "audio/mpeg", + ".msh": "model/mesh", + ".nc": "application/x-netcdf", + ".oda": "application/oda", + ".ogv": "video/ogv", + ".pct": "image/pict", + ".pic": "image/pict", + ".pict": "image/pict", + ".pnt": "image/x-macpaint", + ".pntg": "image/x-macpaint", + ".ps": "application/postscript", + ".qt": "video/quicktime", + ".qti": "image/x-quicktime", + ".qtif": "image/x-quicktime", + ".ram": "audio/x-pn-realaudio", + ".rdf": "application/rdf+xml", + ".rm": "application/vnd.rn-realmedia", + ".roff": "application/x-troff", + ".sgm": "text/sgml", + ".sgml": "text/sgml", + ".silo": "model/mesh", + ".skd": "application/x-koan", + ".skm": "application/x-koan", + ".skp": "application/x-koan", + ".skt": "application/x-koan", + ".smi": "application/smil", + ".smil": "application/smil", + ".snd": "audio/basic", + ".so": "application/octet-stream", + ".svg": "image/svg+xml", + ".t": "application/x-troff", + ".texi": "application/x-texinfo", + ".texinfo": "application/x-texinfo", + ".tif": "image/tiff", + ".tiff": "image/tiff", + ".tr": "application/x-troff", + ".txt": "text/plain", + ".vrml": "model/vrml", + ".vxml": "application/voicexml+xml", + ".webm": "video/webm", + ".wrl": "model/vrml", + ".xht": "application/xhtml+xml", + ".xhtml": "application/xhtml+xml", + ".xml": "application/xml", + ".xsl": "application/xml", + ".xslt": "application/xslt+xml", + ".xul": "application/vnd.mozilla.xul+xml", + ".webp": "image/webp", + ".323": "text/h323", + ".aab": "application/x-authoware-bin", + ".aam": "application/x-authoware-map", + ".aas": "application/x-authoware-seg", + ".acx": "application/internet-property-stream", + ".als": "audio/X-Alpha5", + ".amc": "application/x-mpeg", + ".ani": "application/octet-stream", + ".asd": "application/astound", + ".asf": "video/x-ms-asf", + ".asn": "application/astound", + ".asp": "application/x-asap", + ".asr": "video/x-ms-asf", + ".asx": "video/x-ms-asf", + ".avb": "application/octet-stream", + ".awb": "audio/amr-wb", + ".axs": "application/olescript", + ".bas": "text/plain", + ".bin ": "application/octet-stream", + ".bld": "application/bld", + ".bld2": "application/bld2", + ".bpk": "application/octet-stream", + ".c": "text/plain", + ".cal": "image/x-cals", + ".cat": "application/vnd.ms-pkiseccat", + ".ccn": "application/x-cnc", + ".cco": "application/x-cocoa", + ".cer": "application/x-x509-ca-cert", + ".cgi": "magnus-internal/cgi", + ".chat": "application/x-chat", + ".clp": "application/x-msclip", + ".cmx": "image/x-cmx", + ".co": "application/x-cult3d-object", + ".cod": "image/cis-cod", + ".conf": "text/plain", + ".cpp": "text/plain", + ".crd": "application/x-mscardfile", + ".crl": "application/pkix-crl", + ".crt": "application/x-x509-ca-cert", + ".csm": "chemical/x-csml", + ".csml": "chemical/x-csml", + ".cur": "application/octet-stream", + ".dcm": "x-lml/x-evm", + ".dcx": "image/x-dcx", + ".der": "application/x-x509-ca-cert", + ".dhtml": "text/html", + ".dot": "application/msword", + ".dwf": "drawing/x-dwf", + ".dwg": "application/x-autocad", + ".dxf": "application/x-autocad", + ".ebk": "application/x-expandedbook", + ".emb": "chemical/x-embl-dl-nucleotide", + ".embl": "chemical/x-embl-dl-nucleotide", + ".epub": "application/epub+zip", + ".eri": "image/x-eri", + ".es": "audio/echospeech", + ".esl": "audio/echospeech", + ".etc": "application/x-earthtime", + ".evm": "x-lml/x-evm", + ".evy": "application/envoy", + ".fh4": "image/x-freehand", + ".fh5": "image/x-freehand", + ".fhc": "image/x-freehand", + ".fif": "application/fractals", + ".flr": "x-world/x-vrml", + ".fm": "application/x-maker", + ".fpx": "image/x-fpx", + ".fvi": "video/isivideo", + ".gau": "chemical/x-gaussian-input", + ".gca": "application/x-gca-compressed", + ".gdb": "x-lml/x-gdb", + ".gps": "application/x-gps", + ".h": "text/plain", + ".hdm": "text/x-hdml", + ".hdml": "text/x-hdml", + ".hlp": "application/winhlp", + ".hta": "application/hta", + ".htc": "text/x-component", + ".hts": "text/html", + ".htt": "text/webviewhtml", + ".ifm": "image/gif", + ".ifs": "image/ifs", + ".iii": "application/x-iphone", + ".imy": "audio/melody", + ".ins": "application/x-internet-signup", + ".ips": "application/x-ipscript", + ".ipx": "application/x-ipix", + ".isp": "application/x-internet-signup", + ".it": "audio/x-mod", + ".itz": "audio/x-mod", + ".ivr": "i-world/i-vrml", + ".j2k": "image/j2k", + ".jam": "application/x-jam", + ".java": "text/plain", + ".jfif": "image/pipeg", + ".jpz": "image/jpeg", + ".jwc": "application/jwc", + ".kjx": "application/x-kjx", + ".lak": "x-lml/x-lak", + ".lcc": "application/fastman", + ".lcl": "application/x-digitalloca", + ".lcr": "application/x-digitalloca", + ".lgh": "application/lgh", + ".lml": "x-lml/x-lml", + ".lmlpack": "x-lml/x-lmlpack", + ".log": "text/plain", + ".lsf": "video/x-la-asf", + ".lsx": "video/x-la-asf", + ".m13": "application/x-msmediaview", + ".m14": "application/x-msmediaview", + ".m15": "audio/x-mod", + ".m3url": "audio/x-mpegurl", + ".m4b": "audio/mp4a-latm", + ".ma1": "audio/ma1", + ".ma2": "audio/ma2", + ".ma3": "audio/ma3", + ".ma5": "audio/ma5", + ".map": "magnus-internal/imagemap", + ".mbd": "application/mbedlet", + ".mct": "application/x-mascot", + ".mdb": "application/x-msaccess", + ".mdz": "audio/x-mod", + ".mel": "text/x-vmel", + ".mht": "message/rfc822", + ".mhtml": "message/rfc822", + ".mi": "application/x-mif", + ".mil": "image/x-cals", + ".mio": "audio/x-mio", + ".mmf": "application/x-skt-lbs", + ".mng": "video/x-mng", + ".mny": "application/x-msmoney", + ".moc": "application/x-mocha", + ".mocha": "application/x-mocha", + ".mod": "audio/x-mod", + ".mof": "application/x-yumekara", + ".mol": "chemical/x-mdl-molfile", + ".mop": "chemical/x-mopac-input", + ".mpa": "video/mpeg", + ".mpc": "application/vnd.mpohun.certificate", + ".mpg4": "video/mp4", + ".mpn": "application/vnd.mophun.application", + ".mpp": "application/vnd.ms-project", + ".mps": "application/x-mapserver", + ".mpv2": "video/mpeg", + ".mrl": "text/x-mrml", + ".mrm": "application/x-mrm", + ".msg": "application/vnd.ms-outlook", + ".mts": "application/metastream", + ".mtx": "application/metastream", + ".mtz": "application/metastream", + ".mvb": "application/x-msmediaview", + ".mzv": "application/metastream", + ".nar": "application/zip", + ".nbmp": "image/nbmp", + ".ndb": "x-lml/x-ndb", + ".ndwn": "application/ndwn", + ".nif": "application/x-nif", + ".nmz": "application/x-scream", + ".nokia-op-logo": "image/vnd.nok-oplogo-color", + ".npx": "application/x-netfpx", + ".nsnd": "audio/nsnd", + ".nva": "application/x-neva1", + ".nws": "message/rfc822", + ".oom": "application/x-AtlasMate-Plugin", + ".p10": "application/pkcs10", + ".p12": "application/x-pkcs12", + ".p7b": "application/x-pkcs7-certificates", + ".p7c": "application/x-pkcs7-mime", + ".p7m": "application/x-pkcs7-mime", + ".p7r": "application/x-pkcs7-certreqresp", + ".p7s": "application/x-pkcs7-signature", + ".pac": "audio/x-pac", + ".pae": "audio/x-epac", + ".pan": "application/x-pan", + ".pcx": "image/x-pcx", + ".pda": "image/x-pda", + ".pfr": "application/font-tdpfr", + ".pfx": "application/x-pkcs12", + ".pko": "application/ynd.ms-pkipko", + ".pm": "application/x-perl", + ".pma": "application/x-perfmon", + ".pmc": "application/x-perfmon", + ".pmd": "application/x-pmd", + ".pml": "application/x-perfmon", + ".pmr": "application/x-perfmon", + ".pmw": "application/x-perfmon", + ".pnz": "image/png", + ".pot,": "application/vnd.ms-powerpoint", + ".pps": "application/vnd.ms-powerpoint", + ".pqf": "application/x-cprplayer", + ".pqi": "application/cprplayer", + ".prc": "application/x-prc", + ".prf": "application/pics-rules", + ".prop": "text/plain", + ".proxy": "application/x-ns-proxy-autoconfig", + ".ptlk": "application/listenup", + ".pub": "application/x-mspublisher", + ".pvx": "video/x-pv-pvx", + ".qcp": "audio/vnd.qcelp", + ".r3t": "text/vnd.rn-realtext3d", + ".rar": "application/octet-stream", + ".rc": "text/plain", + ".rf": "image/vnd.rn-realflash", + ".rlf": "application/x-richlink", + ".rmf": "audio/x-rmf", + ".rmi": "audio/mid", + ".rmm": "audio/x-pn-realaudio", + ".rmvb": "audio/x-pn-realaudio", + ".rnx": "application/vnd.rn-realplayer", + ".rp": "image/vnd.rn-realpix", + ".rt": "text/vnd.rn-realtext", + ".rte": "x-lml/x-gps", + ".rtg": "application/metastream", + ".rv": "video/vnd.rn-realvideo", + ".rwc": "application/x-rogerwilco", + ".s3m": "audio/x-mod", + ".s3z": "audio/x-mod", + ".sca": "application/x-supercard", + ".scd": "application/x-msschedule", + ".sct": "text/scriptlet", + ".sdf": "application/e-score", + ".sea": "application/x-stuffit", + ".setpay": "application/set-payment-initiation", + ".setreg": "application/set-registration-initiation", + ".shtml": "text/html", + ".shtm": "text/html", + ".shw": "application/presentations", + ".si6": "image/si6", + ".si7": "image/vnd.stiwap.sis", + ".si9": "image/vnd.lgtwap.sis", + ".slc": "application/x-salsa", + ".smd": "audio/x-smd", + ".smp": "application/studiom", + ".smz": "audio/x-smd", + ".spc": "application/x-pkcs7-certificates", + ".spr": "application/x-sprite", + ".sprite": "application/x-sprite", + ".sdp": "application/sdp", + ".spt": "application/x-spt", + ".sst": "application/vnd.ms-pkicertstore", + ".stk": "application/hyperstudio", + ".stl": "application/vnd.ms-pkistl", + ".stm": "text/html", + ".svf": "image/vnd", + ".svh": "image/svh", + ".svr": "x-world/x-svr", + ".swfl": "application/x-shockwave-flash", + ".tad": "application/octet-stream", + ".talk": "text/x-speech", + ".taz": "application/x-tar", + ".tbp": "application/x-timbuktu", + ".tbt": "application/x-timbuktu", + ".tgz": "application/x-compressed", + ".thm": "application/vnd.eri.thm", + ".tki": "application/x-tkined", + ".tkined": "application/x-tkined", + ".toc": "application/toc", + ".toy": "image/toy", + ".trk": "x-lml/x-gps", + ".trm": "application/x-msterminal", + ".tsi": "audio/tsplayer", + ".tsp": "application/dsptype", + ".ttf": "application/octet-stream", + ".ttz": "application/t-time", + ".uls": "text/iuls", + ".ult": "audio/x-mod", + ".uu": "application/x-uuencode", + ".uue": "application/x-uuencode", + ".vcf": "text/x-vcard", + ".vdo": "video/vdo", + ".vib": "audio/vib", + ".viv": "video/vivo", + ".vivo": "video/vivo", + ".vmd": "application/vocaltec-media-desc", + ".vmf": "application/vocaltec-media-file", + ".vmi": "application/x-dreamcast-vms-info", + ".vms": "application/x-dreamcast-vms", + ".vox": "audio/voxware", + ".vqe": "audio/x-twinvq-plugin", + ".vqf": "audio/x-twinvq", + ".vql": "audio/x-twinvq", + ".vre": "x-world/x-vream", + ".vrt": "x-world/x-vrt", + ".vrw": "x-world/x-vream", + ".vts": "workbook/formulaone", + ".wcm": "application/vnd.ms-works", + ".wdb": "application/vnd.ms-works", + ".web": "application/vnd.xara", + ".wi": "image/wavelet", + ".wis": "application/x-InstallShield", + ".wks": "application/vnd.ms-works", + ".wmd": "application/x-ms-wmd", + ".wmf": "application/x-msmetafile", + ".wmlscript": "text/vnd.wap.wmlscript", + ".wmz": "application/x-ms-wmz", + ".wpng": "image/x-up-wpng", + ".wps": "application/vnd.ms-works", + ".wpt": "x-lml/x-gps", + ".wri": "application/x-mswrite", + ".wrz": "x-world/x-vrml", + ".ws": "text/vnd.wap.wmlscript", + ".wsc": "application/vnd.wap.wmlscriptc", + ".wv": "video/wavelet", + ".wxl": "application/x-wxl", + ".x-gzip": "application/x-gzip", + ".xaf": "x-world/x-vrml", + ".xar": "application/vnd.xara", + ".xdm": "application/x-xdma", + ".xdma": "application/x-xdma", + ".xdw": "application/vnd.fujixerox.docuworks", + ".xhtm": "application/xhtml+xml", + ".xla": "application/vnd.ms-excel", + ".xlc": "application/vnd.ms-excel", + ".xll": "application/x-excel", + ".xlm": "application/vnd.ms-excel", + ".xlt": "application/vnd.ms-excel", + ".xlw": "application/vnd.ms-excel", + ".xm": "audio/x-mod", + ".xmz": "audio/x-mod", + ".xof": "x-world/x-vrml", + ".xpi": "application/x-xpinstall", + ".xsit": "text/xml", + ".yz1": "application/x-yz1", + ".z": "application/x-compress", + ".zac": "application/x-zaurus-zac", + ".json": "application/json", +} + +// TypeByExtension returns the MIME type associated with the file extension ext. +// gets the file's MIME type for HTTP header Content-Type +func TypeByExtension(filePath string) string { + typ := mime.TypeByExtension(path.Ext(filePath)) + if typ == "" { + typ = extToMimeType[strings.ToLower(path.Ext(filePath))] + } + return typ +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go new file mode 100644 index 000000000000..b0b4a5027158 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go @@ -0,0 +1,69 @@ +package oss + +import ( + "hash" + "io" + "net/http" +) + +// Response defines HTTP response from OSS +type Response struct { + StatusCode int + Headers http.Header + Body io.ReadCloser + ClientCRC uint64 + ServerCRC uint64 +} + +func (r *Response) Read(p []byte) (n int, err error) { + return r.Body.Read(p) +} + +// Close close http reponse body +func (r *Response) Close() error { + return r.Body.Close() +} + +// PutObjectRequest is the request of DoPutObject +type PutObjectRequest struct { + ObjectKey string + Reader io.Reader +} + +// GetObjectRequest is the request of DoGetObject +type GetObjectRequest struct { + ObjectKey string +} + +// GetObjectResult is the result of DoGetObject +type GetObjectResult struct { + Response *Response + ClientCRC hash.Hash64 + ServerCRC uint64 +} + +// AppendObjectRequest is the requtest of DoAppendObject +type AppendObjectRequest struct { + ObjectKey string + Reader io.Reader + Position int64 +} + +// AppendObjectResult is the result of DoAppendObject +type AppendObjectResult struct { + NextPosition int64 + CRC uint64 +} + +// UploadPartRequest is the request of DoUploadPart +type UploadPartRequest struct { + InitResult *InitiateMultipartUploadResult + Reader io.Reader + PartSize int64 + PartNumber int +} + +// UploadPartResult is the result of DoUploadPart +type UploadPartResult struct { + Part UploadPart +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go new file mode 100644 index 000000000000..56ed8cadfd01 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go @@ -0,0 +1,474 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strconv" +) + +// CopyFile is multipart copy object +// +// srcBucketName source bucket name +// srcObjectKey source object name +// destObjectKey target object name in the form of bucketname.objectkey +// partSize the part size in byte. +// options object's contraints. Check out function InitiateMultipartUpload. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error { + destBucketName := bucket.BucketName + if partSize < MinPartSize || partSize > MaxPartSize { + return errors.New("oss: part size invalid range (1024KB, 5GB]") + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + var strVersionId string + versionId, _ := FindOption(options, "versionId", nil) + if versionId != nil { + strVersionId = versionId.(string) + } + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey, strVersionId) + if cpFilePath != "" { + return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines) + } + } + + return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey, + partSize, options, routines) +} + +func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject, versionId string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject) + src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject) + cpFileName := getCpFileName(src, dest, versionId) + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// ----- Concurrently copy without checkpoint --------- + +// copyWorkerArg defines the copy worker arguments +type copyWorkerArg struct { + bucket *Bucket + imur InitiateMultipartUploadResult + srcBucketName string + srcObjectKey string + options []Option + hook copyPartHook +} + +// copyPartHook is the hook for testing purpose +type copyPartHook func(part copyPart) error + +var copyPartHooker copyPartHook = defaultCopyPartHook + +func defaultCopyPartHook(part copyPart) error { + return nil +} + +// copyWorker copies worker +func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) { + for chunk := range jobs { + if err := arg.hook(chunk); err != nil { + failed <- err + break + } + chunkSize := chunk.End - chunk.Start + 1 + part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey, + chunk.Start, chunkSize, chunk.Number, arg.options...) + if err != nil { + failed <- err + break + } + select { + case <-die: + return + default: + } + results <- part + } +} + +// copyScheduler +func copyScheduler(jobs chan copyPart, parts []copyPart) { + for _, part := range parts { + jobs <- part + } + close(jobs) +} + +// copyPart structure +type copyPart struct { + Number int // Part number (from 1 to 10,000) + Start int64 // The start index in the source file. + End int64 // The end index in the source file +} + +// getCopyParts calculates copy parts +func getCopyParts(objectSize, partSize int64) []copyPart { + parts := []copyPart{} + part := copyPart{} + i := 0 + for offset := int64(0); offset < objectSize; offset += partSize { + part.Number = i + 1 + part.Start = offset + part.End = GetPartEnd(offset, objectSize, partSize) + parts = append(parts, part) + i++ + } + return parts +} + +// getSrcObjectBytes gets the source file size +func getSrcObjectBytes(parts []copyPart) int64 { + var ob int64 + for _, part := range parts { + ob += (part.End - part.Start + 1) + } + return ob +} + +// copyFile is a concurrently copy without checkpoint +func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, + partSize int64, options []Option, routines int) error { + descBucket, err := bucket.Client.Bucket(destBucketName) + srcBucket, err := bucket.Client.Bucket(srcBucketName) + listener := GetProgressListener(options) + + // choice valid options + headerOptions := ChoiceHeadObjectOption(options) + partOptions := ChoiceTransferPartOption(options) + completeOptions := ChoiceCompletePartOption(options) + abortOptions := ChoiceAbortPartOption(options) + + meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...) + if err != nil { + return err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + // Get copy parts + parts := getCopyParts(objectSize, partSize) + // Initialize the multipart upload + imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...) + if err != nil { + return err + } + + jobs := make(chan copyPart, len(parts)) + results := make(chan UploadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getSrcObjectBytes(parts) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) + publishProgress(listener, event) + + // Start to copy workers + arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker} + for w := 1; w <= routines; w++ { + go copyWorker(w, arg, jobs, results, failed, die) + } + + // Start the scheduler + go copyScheduler(jobs, parts) + + // Wait for the parts finished. + completed := 0 + ups := make([]UploadPart, len(parts)) + for completed < len(parts) { + select { + case part := <-results: + completed++ + ups[part.PartNumber-1] = part + copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) + completedBytes += copyBytes + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, copyBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + descBucket.AbortMultipartUpload(imur, abortOptions...) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + + // Complete the multipart upload + _, err = descBucket.CompleteMultipartUpload(imur, ups, completeOptions...) + if err != nil { + bucket.AbortMultipartUpload(imur, abortOptions...) + return err + } + return nil +} + +// ----- Concurrently copy with checkpoint ----- + +const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A" + +type copyCheckpoint struct { + Magic string // Magic + MD5 string // CP content MD5 + SrcBucketName string // Source bucket + SrcObjectKey string // Source object + DestBucketName string // Target bucket + DestObjectKey string // Target object + CopyID string // Copy ID + ObjStat objectStat // Object stat + Parts []copyPart // Copy parts + CopyParts []UploadPart // The uploaded parts + PartStat []bool // The part status +} + +// isValid checks if the data is valid which means CP is valid and object is not updated. +func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) { + // Compare CP's magic number and the MD5. + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != downloadCpMagic || b64 != cp.MD5 { + return false, nil + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return false, err + } + + // Compare the object size and last modified time and etag. + if cp.ObjStat.Size != objectSize || + cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || + cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { + return false, nil + } + + return true, nil +} + +// load loads from the checkpoint file +func (cp *copyCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// update updates the parts status +func (cp *copyCheckpoint) update(part UploadPart) { + cp.CopyParts[part.PartNumber-1] = part + cp.PartStat[part.PartNumber-1] = true +} + +// dump dumps the CP to the file +func (cp *copyCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialization + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// todoParts returns unfinished parts +func (cp copyCheckpoint) todoParts() []copyPart { + dps := []copyPart{} + for i, ps := range cp.PartStat { + if !ps { + dps = append(dps, cp.Parts[i]) + } + } + return dps +} + +// getCompletedBytes returns finished bytes count +func (cp copyCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for i, part := range cp.Parts { + if cp.PartStat[i] { + completedBytes += (part.End - part.Start + 1) + } + } + return completedBytes +} + +// prepare initializes the multipart upload +func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string, + partSize int64, options []Option) error { + // CP + cp.Magic = copyCpMagic + cp.SrcBucketName = srcBucket.BucketName + cp.SrcObjectKey = srcObjectKey + cp.DestBucketName = destBucket.BucketName + cp.DestObjectKey = destObjectKey + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 64) + if err != nil { + return err + } + + cp.ObjStat.Size = objectSize + cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) + cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) + + // Parts + cp.Parts = getCopyParts(objectSize, partSize) + cp.PartStat = make([]bool, len(cp.Parts)) + for i := range cp.PartStat { + cp.PartStat[i] = false + } + cp.CopyParts = make([]UploadPart, len(cp.Parts)) + + // Init copy + imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...) + if err != nil { + return err + } + cp.CopyID = imur.UploadID + + return nil +} + +func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { + imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName, + Key: cp.DestObjectKey, UploadID: cp.CopyID} + _, err := bucket.CompleteMultipartUpload(imur, parts, options...) + if err != nil { + return err + } + os.Remove(cpFilePath) + return err +} + +// copyFileWithCp is concurrently copy with checkpoint +func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, + partSize int64, options []Option, cpFilePath string, routines int) error { + descBucket, err := bucket.Client.Bucket(destBucketName) + srcBucket, err := bucket.Client.Bucket(srcBucketName) + listener := GetProgressListener(options) + + // Load CP data + ccp := copyCheckpoint{} + err = ccp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // choice valid options + headerOptions := ChoiceHeadObjectOption(options) + partOptions := ChoiceTransferPartOption(options) + completeOptions := ChoiceCompletePartOption(options) + + meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, headerOptions...) + if err != nil { + return err + } + + // Load error or the CP data is invalid---reinitialize + valid, err := ccp.isValid(meta) + if err != nil || !valid { + if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil { + return err + } + os.Remove(cpFilePath) + } + + // Unfinished parts + parts := ccp.todoParts() + imur := InitiateMultipartUploadResult{ + Bucket: destBucketName, + Key: destObjectKey, + UploadID: ccp.CopyID} + + jobs := make(chan copyPart, len(parts)) + results := make(chan UploadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := ccp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size, 0) + publishProgress(listener, event) + + // Start the worker coroutines + arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, partOptions, copyPartHooker} + for w := 1; w <= routines; w++ { + go copyWorker(w, arg, jobs, results, failed, die) + } + + // Start the scheduler + go copyScheduler(jobs, parts) + + // Wait for the parts completed. + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + ccp.update(part) + ccp.dump(cpFilePath) + copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) + completedBytes += copyBytes + event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size, copyBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size, 0) + publishProgress(listener, event) + + return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, completeOptions) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go new file mode 100644 index 000000000000..9e71419712d5 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go @@ -0,0 +1,305 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "io" + "net/http" + "net/url" + "os" + "sort" + "strconv" +) + +// InitiateMultipartUpload initializes multipart upload +// +// objectKey object name +// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, Meta, check out the following link: +// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html +// +// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) { + var imur InitiateMultipartUploadResult + opts := AddContentType(options, objectKey) + params, _ := GetRawParams(options) + paramKeys := []string{"sequential", "withHashContext", "x-oss-enable-md5", "x-oss-enable-sha1", "x-oss-enable-sha256"} + ConvertEmptyValueToNil(params, paramKeys) + params["uploads"] = nil + + resp, err := bucket.do("POST", objectKey, params, opts, nil, nil) + if err != nil { + return imur, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &imur) + return imur, err +} + +// UploadPart uploads parts +// +// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts. +// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file. +// And thus with the same part number and upload Id, another part upload will overwrite the data. +// Except the last one, minimal part size is 100KB. There's no limit on the last part size. +// +// imur the returned value of InitiateMultipartUpload. +// reader io.Reader the reader for the part's data. +// size the part size. +// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error. +// +// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader, + partSize int64, partNumber int, options ...Option) (UploadPart, error) { + request := &UploadPartRequest{ + InitResult: &imur, + Reader: reader, + PartSize: partSize, + PartNumber: partNumber, + } + + result, err := bucket.DoUploadPart(request, options) + + return result.Part, err +} + +// UploadPartFromFile uploads part from the file. +// +// imur the return value of a successful InitiateMultipartUpload. +// filePath the local file path to upload. +// startPosition the start position in the local file. +// partSize the part size. +// partNumber the part number (from 1 to 10,000) +// +// UploadPart the return value consists of PartNumber and ETag. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string, + startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { + var part = UploadPart{} + fd, err := os.Open(filePath) + if err != nil { + return part, err + } + defer fd.Close() + fd.Seek(startPosition, os.SEEK_SET) + + request := &UploadPartRequest{ + InitResult: &imur, + Reader: fd, + PartSize: partSize, + PartNumber: partNumber, + } + + result, err := bucket.DoUploadPart(request, options) + + return result.Part, err +} + +// DoUploadPart does the actual part upload. +// +// request part upload request +// +// UploadPartResult the result of uploading part. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) { + listener := GetProgressListener(options) + options = append(options, ContentLength(request.PartSize)) + params := map[string]interface{}{} + params["partNumber"] = strconv.Itoa(request.PartNumber) + params["uploadId"] = request.InitResult.UploadID + resp, err := bucket.do("PUT", request.InitResult.Key, params, options, + &io.LimitedReader{R: request.Reader, N: request.PartSize}, listener) + if err != nil { + return &UploadPartResult{}, err + } + defer resp.Body.Close() + + part := UploadPart{ + ETag: resp.Headers.Get(HTTPHeaderEtag), + PartNumber: request.PartNumber, + } + + if bucket.GetConfig().IsEnableCRC { + err = CheckCRC(resp, "DoUploadPart") + if err != nil { + return &UploadPartResult{part}, err + } + } + + return &UploadPartResult{part}, nil +} + +// UploadPartCopy uploads part copy +// +// imur the return value of InitiateMultipartUpload +// copySrc source Object name +// startPosition the part's start index in the source file +// partSize the part size +// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error. +// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error. +// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail +// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html +// +// UploadPart the return value consists of PartNumber and ETag. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string, + startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { + var out UploadPartCopyResult + var part UploadPart + var opts []Option + + //first find version id + versionIdKey := "versionId" + versionId, _ := FindOption(options, versionIdKey, nil) + if versionId == nil { + opts = []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)), + CopySourceRange(startPosition, partSize)} + } else { + opts = []Option{CopySourceVersion(srcBucketName, url.QueryEscape(srcObjectKey), versionId.(string)), + CopySourceRange(startPosition, partSize)} + options = DeleteOption(options, versionIdKey) + } + + opts = append(opts, options...) + + params := map[string]interface{}{} + params["partNumber"] = strconv.Itoa(partNumber) + params["uploadId"] = imur.UploadID + resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil) + if err != nil { + return part, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return part, err + } + part.ETag = out.ETag + part.PartNumber = partNumber + + return part, nil +} + +// CompleteMultipartUpload completes the multipart upload. +// +// imur the return value of InitiateMultipartUpload. +// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy. +// +// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult, + parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) { + var out CompleteMultipartUploadResult + + sort.Sort(UploadParts(parts)) + cxml := completeMultipartUploadXML{} + cxml.Part = parts + bs, err := xml.Marshal(cxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// AbortMultipartUpload aborts the multipart upload. +// +// imur the return value of InitiateMultipartUpload. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error { + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return CheckRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// ListUploadedParts lists the uploaded parts. +// +// imur the return value of InitiateMultipartUpload. +// +// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) { + var out ListUploadedPartsResult + options = append(options, EncodingType("url")) + + params := map[string]interface{}{} + params, err := GetRawParams(options) + if err != nil { + return out, err + } + + params["uploadId"] = imur.UploadID + resp, err := bucket.do("GET", imur.Key, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + err = decodeListUploadedPartsResult(&out) + return out, err +} + +// ListMultipartUploads lists all ongoing multipart upload tasks +// +// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order; +// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys. +// +// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) { + var out ListMultipartUploadResult + + options = append(options, EncodingType("url")) + params, err := GetRawParams(options) + if err != nil { + return out, err + } + params["uploads"] = nil + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + err = decodeListMultipartUploadResult(&out) + return out, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go new file mode 100644 index 000000000000..ccae9f429994 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go @@ -0,0 +1,689 @@ +package oss + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +type optionType string + +const ( + optionParam optionType = "HTTPParameter" // URL parameter + optionHTTP optionType = "HTTPHeader" // HTTP header + optionArg optionType = "FuncArgument" // Function argument +) + +const ( + deleteObjectsQuiet = "delete-objects-quiet" + routineNum = "x-routine-num" + checkpointConfig = "x-cp-config" + initCRC64 = "init-crc64" + progressListener = "x-progress-listener" + storageClass = "storage-class" + responseHeader = "x-response-header" + redundancyType = "redundancy-type" + objectHashFunc = "object-hash-func" +) + +type ( + optionValue struct { + Value interface{} + Type optionType + } + + // Option HTTP option + Option func(map[string]optionValue) error +) + +// ACL is an option to set X-Oss-Acl header +func ACL(acl ACLType) Option { + return setHeader(HTTPHeaderOssACL, string(acl)) +} + +// ContentType is an option to set Content-Type header +func ContentType(value string) Option { + return setHeader(HTTPHeaderContentType, value) +} + +// ContentLength is an option to set Content-Length header +func ContentLength(length int64) Option { + return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10)) +} + +// CacheControl is an option to set Cache-Control header +func CacheControl(value string) Option { + return setHeader(HTTPHeaderCacheControl, value) +} + +// ContentDisposition is an option to set Content-Disposition header +func ContentDisposition(value string) Option { + return setHeader(HTTPHeaderContentDisposition, value) +} + +// ContentEncoding is an option to set Content-Encoding header +func ContentEncoding(value string) Option { + return setHeader(HTTPHeaderContentEncoding, value) +} + +// ContentLanguage is an option to set Content-Language header +func ContentLanguage(value string) Option { + return setHeader(HTTPHeaderContentLanguage, value) +} + +// ContentMD5 is an option to set Content-MD5 header +func ContentMD5(value string) Option { + return setHeader(HTTPHeaderContentMD5, value) +} + +// Expires is an option to set Expires header +func Expires(t time.Time) Option { + return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat)) +} + +// Meta is an option to set Meta header +func Meta(key, value string) Option { + return setHeader(HTTPHeaderOssMetaPrefix+key, value) +} + +// Range is an option to set Range header, [start, end] +func Range(start, end int64) Option { + return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end)) +} + +// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048 +func NormalizedRange(nr string) Option { + return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr))) +} + +// AcceptEncoding is an option to set Accept-Encoding header +func AcceptEncoding(value string) Option { + return setHeader(HTTPHeaderAcceptEncoding, value) +} + +// IfModifiedSince is an option to set If-Modified-Since header +func IfModifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat)) +} + +// IfUnmodifiedSince is an option to set If-Unmodified-Since header +func IfUnmodifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat)) +} + +// IfMatch is an option to set If-Match header +func IfMatch(value string) Option { + return setHeader(HTTPHeaderIfMatch, value) +} + +// IfNoneMatch is an option to set IfNoneMatch header +func IfNoneMatch(value string) Option { + return setHeader(HTTPHeaderIfNoneMatch, value) +} + +// CopySource is an option to set X-Oss-Copy-Source header +func CopySource(sourceBucket, sourceObject string) Option { + return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject) +} + +// CopySourceVersion is an option to set X-Oss-Copy-Source header,include versionId +func CopySourceVersion(sourceBucket, sourceObject string, versionId string) Option { + return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject+"?"+"versionId="+versionId) +} + +// CopySourceRange is an option to set X-Oss-Copy-Source header +func CopySourceRange(startPosition, partSize int64) Option { + val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" + + strconv.FormatInt((startPosition+partSize-1), 10) + return setHeader(HTTPHeaderOssCopySourceRange, val) +} + +// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header +func CopySourceIfMatch(value string) Option { + return setHeader(HTTPHeaderOssCopySourceIfMatch, value) +} + +// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header +func CopySourceIfNoneMatch(value string) Option { + return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value) +} + +// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header +func CopySourceIfModifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat)) +} + +// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header +func CopySourceIfUnmodifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat)) +} + +// MetadataDirective is an option to set X-Oss-Metadata-Directive header +func MetadataDirective(directive MetadataDirectiveType) Option { + return setHeader(HTTPHeaderOssMetadataDirective, string(directive)) +} + +// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header +func ServerSideEncryption(value string) Option { + return setHeader(HTTPHeaderOssServerSideEncryption, value) +} + +// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header +func ServerSideEncryptionKeyID(value string) Option { + return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value) +} + +// ServerSideDataEncryption is an option to set X-Oss-Server-Side-Data-Encryption header +func ServerSideDataEncryption(value string) Option { + return setHeader(HTTPHeaderOssServerSideDataEncryption, value) +} + +// SSECAlgorithm is an option to set X-Oss-Server-Side-Encryption-Customer-Algorithm header +func SSECAlgorithm(value string) Option { + return setHeader(HTTPHeaderSSECAlgorithm, value) +} + +// SSECKey is an option to set X-Oss-Server-Side-Encryption-Customer-Key header +func SSECKey(value string) Option { + return setHeader(HTTPHeaderSSECKey, value) +} + +// SSECKeyMd5 is an option to set X-Oss-Server-Side-Encryption-Customer-Key-Md5 header +func SSECKeyMd5(value string) Option { + return setHeader(HTTPHeaderSSECKeyMd5, value) +} + +// ObjectACL is an option to set X-Oss-Object-Acl header +func ObjectACL(acl ACLType) Option { + return setHeader(HTTPHeaderOssObjectACL, string(acl)) +} + +// symlinkTarget is an option to set X-Oss-Symlink-Target +func symlinkTarget(targetObjectKey string) Option { + return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey) +} + +// Origin is an option to set Origin header +func Origin(value string) Option { + return setHeader(HTTPHeaderOrigin, value) +} + +// ObjectStorageClass is an option to set the storage class of object +func ObjectStorageClass(storageClass StorageClassType) Option { + return setHeader(HTTPHeaderOssStorageClass, string(storageClass)) +} + +// Callback is an option to set callback values +func Callback(callback string) Option { + return setHeader(HTTPHeaderOssCallback, callback) +} + +// CallbackVar is an option to set callback user defined values +func CallbackVar(callbackVar string) Option { + return setHeader(HTTPHeaderOssCallbackVar, callbackVar) +} + +// RequestPayer is an option to set payer who pay for the request +func RequestPayer(payerType PayerType) Option { + return setHeader(HTTPHeaderOssRequester, strings.ToLower(string(payerType))) +} + +// RequestPayerParam is an option to set payer who pay for the request +func RequestPayerParam(payerType PayerType) Option { + return addParam(strings.ToLower(HTTPHeaderOssRequester), strings.ToLower(string(payerType))) +} + +// SetTagging is an option to set object tagging +func SetTagging(tagging Tagging) Option { + if len(tagging.Tags) == 0 { + return nil + } + + taggingValue := "" + for index, tag := range tagging.Tags { + if index != 0 { + taggingValue += "&" + } + taggingValue += url.QueryEscape(tag.Key) + "=" + url.QueryEscape(tag.Value) + } + return setHeader(HTTPHeaderOssTagging, taggingValue) +} + +// TaggingDirective is an option to set X-Oss-Metadata-Directive header +func TaggingDirective(directive TaggingDirectiveType) Option { + return setHeader(HTTPHeaderOssTaggingDirective, string(directive)) +} + +// ACReqMethod is an option to set Access-Control-Request-Method header +func ACReqMethod(value string) Option { + return setHeader(HTTPHeaderACReqMethod, value) +} + +// ACReqHeaders is an option to set Access-Control-Request-Headers header +func ACReqHeaders(value string) Option { + return setHeader(HTTPHeaderACReqHeaders, value) +} + +// TrafficLimitHeader is an option to set X-Oss-Traffic-Limit +func TrafficLimitHeader(value int64) Option { + return setHeader(HTTPHeaderOssTrafficLimit, strconv.FormatInt(value, 10)) +} + +// UserAgentHeader is an option to set HTTPHeaderUserAgent +func UserAgentHeader(ua string) Option { + return setHeader(HTTPHeaderUserAgent, ua) +} + +// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite +func ForbidOverWrite(forbidWrite bool) Option { + if forbidWrite { + return setHeader(HTTPHeaderOssForbidOverWrite, "true") + } else { + return setHeader(HTTPHeaderOssForbidOverWrite, "false") + } +} + +// RangeBehavior is an option to set Range value, such as "standard" +func RangeBehavior(value string) Option { + return setHeader(HTTPHeaderOssRangeBehavior, value) +} + +func PartHashCtxHeader(value string) Option { + return setHeader(HTTPHeaderOssHashCtx, value) +} + +func PartMd5CtxHeader(value string) Option { + return setHeader(HTTPHeaderOssMd5Ctx, value) +} + +func PartHashCtxParam(value string) Option { + return addParam("x-oss-hash-ctx", value) +} + +func PartMd5CtxParam(value string) Option { + return addParam("x-oss-md5-ctx", value) +} + +// Delimiter is an option to set delimiler parameter +func Delimiter(value string) Option { + return addParam("delimiter", value) +} + +// Marker is an option to set marker parameter +func Marker(value string) Option { + return addParam("marker", value) +} + +// MaxKeys is an option to set maxkeys parameter +func MaxKeys(value int) Option { + return addParam("max-keys", strconv.Itoa(value)) +} + +// Prefix is an option to set prefix parameter +func Prefix(value string) Option { + return addParam("prefix", value) +} + +// EncodingType is an option to set encoding-type parameter +func EncodingType(value string) Option { + return addParam("encoding-type", value) +} + +// MaxUploads is an option to set max-uploads parameter +func MaxUploads(value int) Option { + return addParam("max-uploads", strconv.Itoa(value)) +} + +// KeyMarker is an option to set key-marker parameter +func KeyMarker(value string) Option { + return addParam("key-marker", value) +} + +// VersionIdMarker is an option to set version-id-marker parameter +func VersionIdMarker(value string) Option { + return addParam("version-id-marker", value) +} + +// VersionId is an option to set versionId parameter +func VersionId(value string) Option { + return addParam("versionId", value) +} + +// TagKey is an option to set tag key parameter +func TagKey(value string) Option { + return addParam("tag-key", value) +} + +// TagValue is an option to set tag value parameter +func TagValue(value string) Option { + return addParam("tag-value", value) +} + +// UploadIDMarker is an option to set upload-id-marker parameter +func UploadIDMarker(value string) Option { + return addParam("upload-id-marker", value) +} + +// MaxParts is an option to set max-parts parameter +func MaxParts(value int) Option { + return addParam("max-parts", strconv.Itoa(value)) +} + +// PartNumberMarker is an option to set part-number-marker parameter +func PartNumberMarker(value int) Option { + return addParam("part-number-marker", strconv.Itoa(value)) +} + +// Sequential is an option to set sequential parameter for InitiateMultipartUpload +func Sequential() Option { + return addParam("sequential", "") +} + +// WithHashContext is an option to set withHashContext parameter for InitiateMultipartUpload +func WithHashContext() Option { + return addParam("withHashContext", "") +} + +// EnableMd5 is an option to set x-oss-enable-md5 parameter for InitiateMultipartUpload +func EnableMd5() Option { + return addParam("x-oss-enable-md5", "") +} + +// EnableSha1 is an option to set x-oss-enable-sha1 parameter for InitiateMultipartUpload +func EnableSha1() Option { + return addParam("x-oss-enable-sha1", "") +} + +// EnableSha256 is an option to set x-oss-enable-sha256 parameter for InitiateMultipartUpload +func EnableSha256() Option { + return addParam("x-oss-enable-sha256", "") +} + +// ListType is an option to set List-type parameter for ListObjectsV2 +func ListType(value int) Option { + return addParam("list-type", strconv.Itoa(value)) +} + +// StartAfter is an option to set start-after parameter for ListObjectsV2 +func StartAfter(value string) Option { + return addParam("start-after", value) +} + +// ContinuationToken is an option to set Continuation-token parameter for ListObjectsV2 +func ContinuationToken(value string) Option { + if value == "" { + return addParam("continuation-token", nil) + } + return addParam("continuation-token", value) +} + +// FetchOwner is an option to set Fetch-owner parameter for ListObjectsV2 +func FetchOwner(value bool) Option { + if value { + return addParam("fetch-owner", "true") + } + return addParam("fetch-owner", "false") +} + +// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false. +func DeleteObjectsQuiet(isQuiet bool) Option { + return addArg(deleteObjectsQuiet, isQuiet) +} + +// StorageClass bucket storage class +func StorageClass(value StorageClassType) Option { + return addArg(storageClass, value) +} + +// RedundancyType bucket data redundancy type +func RedundancyType(value DataRedundancyType) Option { + return addArg(redundancyType, value) +} + +// RedundancyType bucket data redundancy type +func ObjectHashFunc(value ObjecthashFuncType) Option { + return addArg(objectHashFunc, value) +} + +// Checkpoint configuration +type cpConfig struct { + IsEnable bool + FilePath string + DirPath string +} + +// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile. +func Checkpoint(isEnable bool, filePath string) Option { + return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath}) +} + +// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile. +func CheckpointDir(isEnable bool, dirPath string) Option { + return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath}) +} + +// Routines DownloadFile/UploadFile routine count +func Routines(n int) Option { + return addArg(routineNum, n) +} + +// InitCRC Init AppendObject CRC +func InitCRC(initCRC uint64) Option { + return addArg(initCRC64, initCRC) +} + +// Progress set progress listener +func Progress(listener ProgressListener) Option { + return addArg(progressListener, listener) +} + +// GetResponseHeader for get response http header +func GetResponseHeader(respHeader *http.Header) Option { + return addArg(responseHeader, respHeader) +} + +// ResponseContentType is an option to set response-content-type param +func ResponseContentType(value string) Option { + return addParam("response-content-type", value) +} + +// ResponseContentLanguage is an option to set response-content-language param +func ResponseContentLanguage(value string) Option { + return addParam("response-content-language", value) +} + +// ResponseExpires is an option to set response-expires param +func ResponseExpires(value string) Option { + return addParam("response-expires", value) +} + +// ResponseCacheControl is an option to set response-cache-control param +func ResponseCacheControl(value string) Option { + return addParam("response-cache-control", value) +} + +// ResponseContentDisposition is an option to set response-content-disposition param +func ResponseContentDisposition(value string) Option { + return addParam("response-content-disposition", value) +} + +// ResponseContentEncoding is an option to set response-content-encoding param +func ResponseContentEncoding(value string) Option { + return addParam("response-content-encoding", value) +} + +// Process is an option to set x-oss-process param +func Process(value string) Option { + return addParam("x-oss-process", value) +} + +// TrafficLimitParam is a option to set x-oss-traffic-limit +func TrafficLimitParam(value int64) Option { + return addParam("x-oss-traffic-limit", strconv.FormatInt(value, 10)) +} + +// SetHeader Allow users to set personalized http headers +func SetHeader(key string, value interface{}) Option { + return setHeader(key, value) +} + +// AddParam Allow users to set personalized http params +func AddParam(key string, value interface{}) Option { + return addParam(key, value) +} + +func setHeader(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionHTTP} + return nil + } +} + +func addParam(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionParam} + return nil + } +} + +func addArg(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionArg} + return nil + } +} + +func handleOptions(headers map[string]string, options []Option) error { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return err + } + } + } + + for k, v := range params { + if v.Type == optionHTTP { + headers[k] = v.Value.(string) + } + } + return nil +} + +func GetRawParams(options []Option) (map[string]interface{}, error) { + // Option + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return nil, err + } + } + } + + paramsm := map[string]interface{}{} + // Serialize + for k, v := range params { + if v.Type == optionParam { + vs := params[k] + paramsm[k] = vs.Value.(string) + } + } + + return paramsm, nil +} + +func FindOption(options []Option, param string, defaultVal interface{}) (interface{}, error) { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return nil, err + } + } + } + + if val, ok := params[param]; ok { + return val.Value, nil + } + return defaultVal, nil +} + +func IsOptionSet(options []Option, option string) (bool, interface{}, error) { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return false, nil, err + } + } + } + + if val, ok := params[option]; ok { + return true, val.Value, nil + } + return false, nil, nil +} + +func DeleteOption(options []Option, strKey string) []Option { + var outOption []Option + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + option(params) + _, exist := params[strKey] + if !exist { + outOption = append(outOption, option) + } else { + delete(params, strKey) + } + } + } + return outOption +} + +func GetRequestId(header http.Header) string { + return header.Get("x-oss-request-id") +} + +func GetVersionId(header http.Header) string { + return header.Get("x-oss-version-id") +} + +func GetCopySrcVersionId(header http.Header) string { + return header.Get("x-oss-copy-source-version-id") +} + +func GetDeleteMark(header http.Header) bool { + value := header.Get("x-oss-delete-marker") + if strings.ToUpper(value) == "TRUE" { + return true + } + return false +} + +func GetQosDelayTime(header http.Header) string { + return header.Get("x-oss-qos-delay-time") +} + +// ForbidOverWrite is an option to set X-Oss-Forbid-Overwrite +func AllowSameActionOverLap(enabled bool) Option { + if enabled { + return setHeader(HTTPHeaderAllowSameActionOverLap, "true") + } else { + return setHeader(HTTPHeaderAllowSameActionOverLap, "false") + } +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go new file mode 100644 index 000000000000..9f3aa9f614c5 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go @@ -0,0 +1,116 @@ +package oss + +import ( + "io" +) + +// ProgressEventType defines transfer progress event type +type ProgressEventType int + +const ( + // TransferStartedEvent transfer started, set TotalBytes + TransferStartedEvent ProgressEventType = 1 + iota + // TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes + TransferDataEvent + // TransferCompletedEvent transfer completed + TransferCompletedEvent + // TransferFailedEvent transfer encounters an error + TransferFailedEvent +) + +// ProgressEvent defines progress event +type ProgressEvent struct { + ConsumedBytes int64 + TotalBytes int64 + RwBytes int64 + EventType ProgressEventType +} + +// ProgressListener listens progress change +type ProgressListener interface { + ProgressChanged(event *ProgressEvent) +} + +// -------------------- Private -------------------- + +func newProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent { + return &ProgressEvent{ + ConsumedBytes: consumed, + TotalBytes: total, + RwBytes: rwBytes, + EventType: eventType} +} + +// publishProgress +func publishProgress(listener ProgressListener, event *ProgressEvent) { + if listener != nil && event != nil { + listener.ProgressChanged(event) + } +} + +type readerTracker struct { + completedBytes int64 +} + +type teeReader struct { + reader io.Reader + writer io.Writer + listener ProgressListener + consumedBytes int64 + totalBytes int64 + tracker *readerTracker +} + +// TeeReader returns a Reader that writes to w what it reads from r. +// All reads from r performed through it are matched with +// corresponding writes to w. There is no internal buffering - +// the write must complete before the read completes. +// Any error encountered while writing is reported as a read error. +func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser { + return &teeReader{ + reader: reader, + writer: writer, + listener: listener, + consumedBytes: 0, + totalBytes: totalBytes, + tracker: tracker, + } +} + +func (t *teeReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + + // Read encountered error + if err != nil && err != io.EOF { + event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes, 0) + publishProgress(t.listener, event) + } + + if n > 0 { + t.consumedBytes += int64(n) + // CRC + if t.writer != nil { + if n, err := t.writer.Write(p[:n]); err != nil { + return n, err + } + } + // Progress + if t.listener != nil { + event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes, int64(n)) + publishProgress(t.listener, event) + } + // Track + if t.tracker != nil { + t.tracker.completedBytes = t.consumedBytes + } + } + + return +} + +func (t *teeReader) Close() error { + if rc, ok := t.reader.(io.ReadCloser); ok { + return rc.Close() + } + return nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go new file mode 100644 index 000000000000..d09bc5ebd37f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_6.go @@ -0,0 +1,11 @@ +// +build !go1.7 + +package oss + +import "net/http" + +// http.ErrUseLastResponse only is defined go1.7 onward + +func disableHTTPRedirect(client *http.Client) { + +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go new file mode 100644 index 000000000000..5b0bb8674e25 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/redirect_1_7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package oss + +import "net/http" + +// http.ErrUseLastResponse only is defined go1.7 onward +func disableHTTPRedirect(client *http.Client) { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go new file mode 100644 index 000000000000..2e0da4637f19 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object.go @@ -0,0 +1,197 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "hash/crc32" + "io" + "io/ioutil" + "net/http" + "os" + "strings" +) + +// CreateSelectCsvObjectMeta is Creating csv object meta +// +// key the object key. +// csvMeta the csv file meta +// options the options for create csv Meta of the object. +// +// MetaEndFrameCSV the csv file meta info +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CreateSelectCsvObjectMeta(key string, csvMeta CsvMetaRequest, options ...Option) (MetaEndFrameCSV, error) { + var endFrame MetaEndFrameCSV + params := map[string]interface{}{} + params["x-oss-process"] = "csv/meta" + + csvMeta.encodeBase64() + bs, err := xml.Marshal(csvMeta) + if err != nil { + return endFrame, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) + if err != nil { + return endFrame, err + } + defer resp.Body.Close() + + _, err = ioutil.ReadAll(resp) + + return resp.Frame.MetaEndFrameCSV, err +} + +// CreateSelectJsonObjectMeta is Creating json object meta +// +// key the object key. +// csvMeta the json file meta +// options the options for create json Meta of the object. +// +// MetaEndFrameJSON the json file meta info +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CreateSelectJsonObjectMeta(key string, jsonMeta JsonMetaRequest, options ...Option) (MetaEndFrameJSON, error) { + var endFrame MetaEndFrameJSON + params := map[string]interface{}{} + params["x-oss-process"] = "json/meta" + + bs, err := xml.Marshal(jsonMeta) + if err != nil { + return endFrame, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) + if err != nil { + return endFrame, err + } + defer resp.Body.Close() + + _, err = ioutil.ReadAll(resp) + + return resp.Frame.MetaEndFrameJSON, err +} + +// SelectObject is the select object api, approve csv and json file. +// +// key the object key. +// selectReq the request data for select object +// options the options for select file of the object. +// +// o.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SelectObject(key string, selectReq SelectRequest, options ...Option) (io.ReadCloser, error) { + params := map[string]interface{}{} + if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() { + params["x-oss-process"] = "csv/select" // default select csv file + } else { + params["x-oss-process"] = "json/select" + } + selectReq.encodeBase64() + bs, err := xml.Marshal(selectReq) + if err != nil { + return nil, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) + if err != nil { + return nil, err + } + if selectReq.OutputSerializationSelect.EnablePayloadCrc != nil && *selectReq.OutputSerializationSelect.EnablePayloadCrc == true { + resp.Frame.EnablePayloadCrc = true + } + resp.Frame.OutputRawData = strings.ToUpper(resp.Headers.Get("x-oss-select-output-raw")) == "TRUE" + + return resp, err +} + +// DoPostSelectObject is the SelectObject/CreateMeta api, approve csv and json file. +// +// key the object key. +// params the resource of oss approve csv/meta, json/meta, csv/select, json/select. +// buf the request data trans to buffer. +// options the options for select file of the object. +// +// SelectObjectResponse the response of select object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPostSelectObject(key string, params map[string]interface{}, buf *bytes.Buffer, options ...Option) (*SelectObjectResponse, error) { + resp, err := bucket.do("POST", key, params, options, buf, nil) + if err != nil { + return nil, err + } + + result := &SelectObjectResponse{ + Body: resp.Body, + StatusCode: resp.StatusCode, + Frame: SelectObjectResult{}, + } + result.Headers = resp.Headers + // result.Frame = SelectObjectResult{} + result.ReadTimeOut = bucket.GetConfig().Timeout + + // Progress + listener := GetProgressListener(options) + + // CRC32 + crcCalc := crc32.NewIEEE() + result.WriterForCheckCrc32 = crcCalc + result.Body = TeeReader(resp.Body, nil, 0, listener, nil) + + err = CheckRespCode(resp.StatusCode, []int{http.StatusPartialContent, http.StatusOK}) + + return result, err +} + +// SelectObjectIntoFile is the selectObject to file api +// +// key the object key. +// fileName saving file's name to localstation. +// selectReq the request data for select object +// options the options for select file of the object. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SelectObjectIntoFile(key, fileName string, selectReq SelectRequest, options ...Option) error { + tempFilePath := fileName + TempFileSuffix + + params := map[string]interface{}{} + if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() { + params["x-oss-process"] = "csv/select" // default select csv file + } else { + params["x-oss-process"] = "json/select" + } + selectReq.encodeBase64() + bs, err := xml.Marshal(selectReq) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + resp, err := bucket.DoPostSelectObject(key, params, buffer, options...) + if err != nil { + return err + } + defer resp.Close() + + // If the local file does not exist, create a new one. If it exists, overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Copy the data to the local file path. + _, err = io.Copy(fd, resp) + fd.Close() + if err != nil { + return err + } + + return os.Rename(tempFilePath, fileName) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go new file mode 100644 index 000000000000..8b75782f3537 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/select_object_type.go @@ -0,0 +1,364 @@ +package oss + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash" + "hash/crc32" + "io" + "net/http" + "time" +) + +// The adapter class for Select object's response. +// The response consists of frames. Each frame has the following format: + +// Type | Payload Length | Header Checksum | Payload | Payload Checksum + +// |<4-->| <--4 bytes------><---4 bytes-------><-n/a-----><--4 bytes---------> +// And we have three kind of frames. +// Data Frame: +// Type:8388609 +// Payload: Offset | Data +// <-8 bytes> + +// Continuous Frame +// Type:8388612 +// Payload: Offset (8-bytes) + +// End Frame +// Type:8388613 +// Payload: Offset | total scanned bytes | http status code | error message +// <-- 8bytes--><-----8 bytes--------><---4 bytes-------><---variabe---> + +// SelectObjectResponse defines HTTP response from OSS SelectObject +type SelectObjectResponse struct { + StatusCode int + Headers http.Header + Body io.ReadCloser + Frame SelectObjectResult + ReadTimeOut uint + ClientCRC32 uint32 + ServerCRC32 uint32 + WriterForCheckCrc32 hash.Hash32 + Finish bool +} + +func (sr *SelectObjectResponse) Read(p []byte) (n int, err error) { + n, err = sr.readFrames(p) + return +} + +// Close http reponse body +func (sr *SelectObjectResponse) Close() error { + return sr.Body.Close() +} + +// PostSelectResult is the request of SelectObject +type PostSelectResult struct { + Response *SelectObjectResponse +} + +// readFrames is read Frame +func (sr *SelectObjectResponse) readFrames(p []byte) (int, error) { + var nn int + var err error + var checkValid bool + if sr.Frame.OutputRawData == true { + nn, err = sr.Body.Read(p) + return nn, err + } + + if sr.Finish { + return 0, io.EOF + } + + for { + // if this Frame is Readed, then not reading Header + if sr.Frame.OpenLine != true { + err = sr.analysisHeader() + if err != nil { + return nn, err + } + } + + if sr.Frame.FrameType == DataFrameType { + n, err := sr.analysisData(p[nn:]) + if err != nil { + return nn, err + } + nn += n + + // if this Frame is readed all data, then empty the Frame to read it with next frame + if sr.Frame.ConsumedBytesLength == sr.Frame.PayloadLength-8 { + checkValid, err = sr.checkPayloadSum() + if err != nil || !checkValid { + return nn, fmt.Errorf("%s", err.Error()) + } + sr.emptyFrame() + } + + if nn == len(p) { + return nn, nil + } + } else if sr.Frame.FrameType == ContinuousFrameType { + checkValid, err = sr.checkPayloadSum() + if err != nil || !checkValid { + return nn, fmt.Errorf("%s", err.Error()) + } + } else if sr.Frame.FrameType == EndFrameType { + err = sr.analysisEndFrame() + if err != nil { + return nn, err + } + checkValid, err = sr.checkPayloadSum() + if checkValid { + sr.Finish = true + } + return nn, err + } else if sr.Frame.FrameType == MetaEndFrameCSVType { + err = sr.analysisMetaEndFrameCSV() + if err != nil { + return nn, err + } + checkValid, err = sr.checkPayloadSum() + if checkValid { + sr.Finish = true + } + return nn, err + } else if sr.Frame.FrameType == MetaEndFrameJSONType { + err = sr.analysisMetaEndFrameJSON() + if err != nil { + return nn, err + } + checkValid, err = sr.checkPayloadSum() + if checkValid { + sr.Finish = true + } + return nn, err + } + } + return nn, nil +} + +type chanReadIO struct { + readLen int + err error +} + +func (sr *SelectObjectResponse) readLen(p []byte, timeOut time.Duration) (int, error) { + r := sr.Body + ch := make(chan chanReadIO, 1) + defer close(ch) + go func(p []byte) { + var needReadLength int + readChan := chanReadIO{} + needReadLength = len(p) + for { + n, err := r.Read(p[readChan.readLen:needReadLength]) + readChan.readLen += n + if err != nil { + readChan.err = err + ch <- readChan + return + } + + if readChan.readLen == needReadLength { + break + } + } + ch <- readChan + }(p) + + select { + case <-time.After(time.Second * timeOut): + return 0, fmt.Errorf("requestId: %s, readLen timeout, timeout is %d(second),need read:%d", sr.Headers.Get(HTTPHeaderOssRequestID), timeOut, len(p)) + case result := <-ch: + return result.readLen, result.err + } +} + +// analysisHeader is reading selectObject response body's header +func (sr *SelectObjectResponse) analysisHeader() error { + headFrameByte := make([]byte, 20) + _, err := sr.readLen(headFrameByte, time.Duration(sr.ReadTimeOut)) + if err != nil { + return fmt.Errorf("requestId: %s, Read response frame header failure,err:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error()) + } + + frameTypeByte := headFrameByte[0:4] + sr.Frame.Version = frameTypeByte[0] + frameTypeByte[0] = 0 + bytesToInt(frameTypeByte, &sr.Frame.FrameType) + + if sr.Frame.FrameType != DataFrameType && sr.Frame.FrameType != ContinuousFrameType && + sr.Frame.FrameType != EndFrameType && sr.Frame.FrameType != MetaEndFrameCSVType && sr.Frame.FrameType != MetaEndFrameJSONType { + return fmt.Errorf("requestId: %s, Unexpected frame type: %d", sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType) + } + + payloadLengthByte := headFrameByte[4:8] + bytesToInt(payloadLengthByte, &sr.Frame.PayloadLength) + headCheckSumByte := headFrameByte[8:12] + bytesToInt(headCheckSumByte, &sr.Frame.HeaderCheckSum) + byteOffset := headFrameByte[12:20] + bytesToInt(byteOffset, &sr.Frame.Offset) + sr.Frame.OpenLine = true + + err = sr.writerCheckCrc32(byteOffset) + return err +} + +// analysisData is reading the DataFrameType data of selectObject response body +func (sr *SelectObjectResponse) analysisData(p []byte) (int, error) { + var needReadLength int32 + lenP := int32(len(p)) + restByteLength := sr.Frame.PayloadLength - 8 - sr.Frame.ConsumedBytesLength + if lenP <= restByteLength { + needReadLength = lenP + } else { + needReadLength = restByteLength + } + n, err := sr.readLen(p[:needReadLength], time.Duration(sr.ReadTimeOut)) + if err != nil { + return n, fmt.Errorf("read frame data error,%s", err.Error()) + } + sr.Frame.ConsumedBytesLength += int32(n) + err = sr.writerCheckCrc32(p[:n]) + return n, err +} + +// analysisEndFrame is reading the EndFrameType data of selectObject response body +func (sr *SelectObjectResponse) analysisEndFrame() error { + var eF EndFrame + payLoadBytes := make([]byte, sr.Frame.PayloadLength-8) + _, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut)) + if err != nil { + return fmt.Errorf("read end frame error:%s", err.Error()) + } + bytesToInt(payLoadBytes[0:8], &eF.TotalScanned) + bytesToInt(payLoadBytes[8:12], &eF.HTTPStatusCode) + errMsgLength := sr.Frame.PayloadLength - 20 + eF.ErrorMsg = string(payLoadBytes[12 : errMsgLength+12]) + sr.Frame.EndFrame.TotalScanned = eF.TotalScanned + sr.Frame.EndFrame.HTTPStatusCode = eF.HTTPStatusCode + sr.Frame.EndFrame.ErrorMsg = eF.ErrorMsg + err = sr.writerCheckCrc32(payLoadBytes) + return err +} + +// analysisMetaEndFrameCSV is reading the MetaEndFrameCSVType data of selectObject response body +func (sr *SelectObjectResponse) analysisMetaEndFrameCSV() error { + var mCF MetaEndFrameCSV + payLoadBytes := make([]byte, sr.Frame.PayloadLength-8) + _, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut)) + if err != nil { + return fmt.Errorf("read meta end csv frame error:%s", err.Error()) + } + + bytesToInt(payLoadBytes[0:8], &mCF.TotalScanned) + bytesToInt(payLoadBytes[8:12], &mCF.Status) + bytesToInt(payLoadBytes[12:16], &mCF.SplitsCount) + bytesToInt(payLoadBytes[16:24], &mCF.RowsCount) + bytesToInt(payLoadBytes[24:28], &mCF.ColumnsCount) + errMsgLength := sr.Frame.PayloadLength - 36 + mCF.ErrorMsg = string(payLoadBytes[28 : errMsgLength+28]) + sr.Frame.MetaEndFrameCSV.ErrorMsg = mCF.ErrorMsg + sr.Frame.MetaEndFrameCSV.TotalScanned = mCF.TotalScanned + sr.Frame.MetaEndFrameCSV.Status = mCF.Status + sr.Frame.MetaEndFrameCSV.SplitsCount = mCF.SplitsCount + sr.Frame.MetaEndFrameCSV.RowsCount = mCF.RowsCount + sr.Frame.MetaEndFrameCSV.ColumnsCount = mCF.ColumnsCount + err = sr.writerCheckCrc32(payLoadBytes) + return err +} + +// analysisMetaEndFrameJSON is reading the MetaEndFrameJSONType data of selectObject response body +func (sr *SelectObjectResponse) analysisMetaEndFrameJSON() error { + var mJF MetaEndFrameJSON + payLoadBytes := make([]byte, sr.Frame.PayloadLength-8) + _, err := sr.readLen(payLoadBytes, time.Duration(sr.ReadTimeOut)) + if err != nil { + return fmt.Errorf("read meta end json frame error:%s", err.Error()) + } + + bytesToInt(payLoadBytes[0:8], &mJF.TotalScanned) + bytesToInt(payLoadBytes[8:12], &mJF.Status) + bytesToInt(payLoadBytes[12:16], &mJF.SplitsCount) + bytesToInt(payLoadBytes[16:24], &mJF.RowsCount) + errMsgLength := sr.Frame.PayloadLength - 32 + mJF.ErrorMsg = string(payLoadBytes[24 : errMsgLength+24]) + sr.Frame.MetaEndFrameJSON.ErrorMsg = mJF.ErrorMsg + sr.Frame.MetaEndFrameJSON.TotalScanned = mJF.TotalScanned + sr.Frame.MetaEndFrameJSON.Status = mJF.Status + sr.Frame.MetaEndFrameJSON.SplitsCount = mJF.SplitsCount + sr.Frame.MetaEndFrameJSON.RowsCount = mJF.RowsCount + + err = sr.writerCheckCrc32(payLoadBytes) + return err +} + +func (sr *SelectObjectResponse) checkPayloadSum() (bool, error) { + payLoadChecksumByte := make([]byte, 4) + n, err := sr.readLen(payLoadChecksumByte, time.Duration(sr.ReadTimeOut)) + if n == 4 { + bytesToInt(payLoadChecksumByte, &sr.Frame.PayloadChecksum) + sr.ServerCRC32 = sr.Frame.PayloadChecksum + sr.ClientCRC32 = sr.WriterForCheckCrc32.Sum32() + if sr.Frame.EnablePayloadCrc == true && sr.ServerCRC32 != 0 && sr.ServerCRC32 != sr.ClientCRC32 { + return false, fmt.Errorf("RequestId: %s, Unexpected frame type: %d, client %d but server %d", + sr.Headers.Get(HTTPHeaderOssRequestID), sr.Frame.FrameType, sr.ClientCRC32, sr.ServerCRC32) + } + return true, err + } + return false, fmt.Errorf("RequestId:%s, read checksum error:%s", sr.Headers.Get(HTTPHeaderOssRequestID), err.Error()) +} + +func (sr *SelectObjectResponse) writerCheckCrc32(p []byte) (err error) { + err = nil + if sr.Frame.EnablePayloadCrc == true { + _, err = sr.WriterForCheckCrc32.Write(p) + } + return err +} + +// emptyFrame is emptying SelectObjectResponse Frame information +func (sr *SelectObjectResponse) emptyFrame() { + crcCalc := crc32.NewIEEE() + sr.WriterForCheckCrc32 = crcCalc + sr.Finish = false + + sr.Frame.ConsumedBytesLength = 0 + sr.Frame.OpenLine = false + sr.Frame.Version = byte(0) + sr.Frame.FrameType = 0 + sr.Frame.PayloadLength = 0 + sr.Frame.HeaderCheckSum = 0 + sr.Frame.Offset = 0 + sr.Frame.Data = "" + + sr.Frame.EndFrame.TotalScanned = 0 + sr.Frame.EndFrame.HTTPStatusCode = 0 + sr.Frame.EndFrame.ErrorMsg = "" + + sr.Frame.MetaEndFrameCSV.TotalScanned = 0 + sr.Frame.MetaEndFrameCSV.Status = 0 + sr.Frame.MetaEndFrameCSV.SplitsCount = 0 + sr.Frame.MetaEndFrameCSV.RowsCount = 0 + sr.Frame.MetaEndFrameCSV.ColumnsCount = 0 + sr.Frame.MetaEndFrameCSV.ErrorMsg = "" + + sr.Frame.MetaEndFrameJSON.TotalScanned = 0 + sr.Frame.MetaEndFrameJSON.Status = 0 + sr.Frame.MetaEndFrameJSON.SplitsCount = 0 + sr.Frame.MetaEndFrameJSON.RowsCount = 0 + sr.Frame.MetaEndFrameJSON.ErrorMsg = "" + + sr.Frame.PayloadChecksum = 0 +} + +// bytesToInt byte's array trans to int +func bytesToInt(b []byte, ret interface{}) { + binBuf := bytes.NewBuffer(b) + binary.Read(binBuf, binary.BigEndian, ret) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go new file mode 100644 index 000000000000..4fb8b1741b3f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go @@ -0,0 +1,41 @@ +// +build !go1.7 + +package oss + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +func newTransport(conn *Conn, config *Config) *http.Transport { + httpTimeOut := conn.config.HTTPTimeout + httpMaxConns := conn.config.HTTPMaxConns + // New Transport + transport := &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + d := net.Dialer{ + Timeout: httpTimeOut.ConnectTimeout, + KeepAlive: 30 * time.Second, + } + if config.LocalAddr != nil { + d.LocalAddr = config.LocalAddr + } + conn, err := d.Dial(netw, addr) + if err != nil { + return nil, err + } + return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil + }, + MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost, + ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, + } + + if config.InsecureSkipVerify { + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } + return transport +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go new file mode 100644 index 000000000000..2fae124e8ea0 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go @@ -0,0 +1,43 @@ +// +build go1.7 + +package oss + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +func newTransport(conn *Conn, config *Config) *http.Transport { + httpTimeOut := conn.config.HTTPTimeout + httpMaxConns := conn.config.HTTPMaxConns + // New Transport + transport := &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + d := net.Dialer{ + Timeout: httpTimeOut.ConnectTimeout, + KeepAlive: 30 * time.Second, + } + if config.LocalAddr != nil { + d.LocalAddr = config.LocalAddr + } + conn, err := d.Dial(netw, addr) + if err != nil { + return nil, err + } + return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil + }, + MaxIdleConns: httpMaxConns.MaxIdleConns, + MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost, + IdleConnTimeout: httpTimeOut.IdleConnTimeout, + ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, + } + + if config.InsecureSkipVerify { + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } + return transport +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go new file mode 100644 index 000000000000..73d54c5f22f2 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go @@ -0,0 +1,1262 @@ +package oss + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "net/url" + "time" +) + +// ListBucketsResult defines the result object from ListBuckets request +type ListBucketsResult struct { + XMLName xml.Name `xml:"ListAllMyBucketsResult"` + Prefix string `xml:"Prefix"` // The prefix in this query + Marker string `xml:"Marker"` // The marker filter + MaxKeys int `xml:"MaxKeys"` // The max entry count to return. This information is returned when IsTruncated is true. + IsTruncated bool `xml:"IsTruncated"` // Flag true means there's remaining buckets to return. + NextMarker string `xml:"NextMarker"` // The marker filter for the next list call + Owner Owner `xml:"Owner"` // The owner information + Buckets []BucketProperties `xml:"Buckets>Bucket"` // The bucket list +} + +// BucketProperties defines bucket properties +type BucketProperties struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` // Bucket name + Location string `xml:"Location"` // Bucket datacenter + CreationDate time.Time `xml:"CreationDate"` // Bucket create time + StorageClass string `xml:"StorageClass"` // Bucket storage class +} + +// GetBucketACLResult defines GetBucketACL request's result +type GetBucketACLResult struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + ACL string `xml:"AccessControlList>Grant"` // Bucket ACL + Owner Owner `xml:"Owner"` // Bucket owner +} + +// LifecycleConfiguration is the Bucket Lifecycle configuration +type LifecycleConfiguration struct { + XMLName xml.Name `xml:"LifecycleConfiguration"` + Rules []LifecycleRule `xml:"Rule"` +} + +// LifecycleRule defines Lifecycle rules +type LifecycleRule struct { + XMLName xml.Name `xml:"Rule"` + ID string `xml:"ID,omitempty"` // The rule ID + Prefix string `xml:"Prefix"` // The object key prefix + Status string `xml:"Status"` // The rule status (enabled or not) + Tags []Tag `xml:"Tag,omitempty"` // the tags property + Expiration *LifecycleExpiration `xml:"Expiration,omitempty"` // The expiration property + Transitions []LifecycleTransition `xml:"Transition,omitempty"` // The transition property + AbortMultipartUpload *LifecycleAbortMultipartUpload `xml:"AbortMultipartUpload,omitempty"` // The AbortMultipartUpload property + NonVersionExpiration *LifecycleVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` + // Deprecated: Use NonVersionTransitions instead. + NonVersionTransition *LifecycleVersionTransition `xml:"-"` // NonVersionTransition is not suggested to use + NonVersionTransitions []LifecycleVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` +} + +// LifecycleExpiration defines the rule's expiration property +type LifecycleExpiration struct { + XMLName xml.Name `xml:"Expiration"` + Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time + Date string `xml:"Date,omitempty"` // Absolute expiration time: The expiration time in date, not recommended + CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired + ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"` // Specifies whether the expired delete tag is automatically deleted +} + +// LifecycleTransition defines the rule's transition propery +type LifecycleTransition struct { + XMLName xml.Name `xml:"Transition"` + Days int `xml:"Days,omitempty"` // Relative transition time: The transition time in days after the last modified time + CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired + StorageClass StorageClassType `xml:"StorageClass,omitempty"` // Specifies the target storage type +} + +// LifecycleAbortMultipartUpload defines the rule's abort multipart upload propery +type LifecycleAbortMultipartUpload struct { + XMLName xml.Name `xml:"AbortMultipartUpload"` + Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time + CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired +} + +// LifecycleVersionExpiration defines the rule's NoncurrentVersionExpiration propery +type LifecycleVersionExpiration struct { + XMLName xml.Name `xml:"NoncurrentVersionExpiration"` + NoncurrentDays int `xml:"NoncurrentDays,omitempty"` // How many days after the Object becomes a non-current version +} + +// LifecycleVersionTransition defines the rule's NoncurrentVersionTransition propery +type LifecycleVersionTransition struct { + XMLName xml.Name `xml:"NoncurrentVersionTransition"` + NoncurrentDays int `xml:"NoncurrentDays,omitempty"` // How many days after the Object becomes a non-current version + StorageClass StorageClassType `xml:"StorageClass,omitempty"` +} + +const iso8601DateFormat = "2006-01-02T15:04:05.000Z" + +// BuildLifecycleRuleByDays builds a lifecycle rule objects will expiration in days after the last modified time +func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule { + var statusStr = "Enabled" + if !status { + statusStr = "Disabled" + } + return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, + Expiration: &LifecycleExpiration{Days: days}} +} + +// BuildLifecycleRuleByDate builds a lifecycle rule objects will expiration in specified date +func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule { + var statusStr = "Enabled" + if !status { + statusStr = "Disabled" + } + date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC).Format(iso8601DateFormat) + return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, + Expiration: &LifecycleExpiration{Date: date}} +} + +// ValidateLifecycleRule Determine if a lifecycle rule is valid, if it is invalid, it will return an error. +func verifyLifecycleRules(rules []LifecycleRule) error { + if len(rules) == 0 { + return fmt.Errorf("invalid rules, the length of rules is zero") + } + for k, rule := range rules { + if rule.Status != "Enabled" && rule.Status != "Disabled" { + return fmt.Errorf("invalid rule, the value of status must be Enabled or Disabled") + } + + abortMPU := rule.AbortMultipartUpload + if abortMPU != nil { + if (abortMPU.Days != 0 && abortMPU.CreatedBeforeDate != "") || (abortMPU.Days == 0 && abortMPU.CreatedBeforeDate == "") { + return fmt.Errorf("invalid abort multipart upload lifecycle, must be set one of CreatedBeforeDate and Days") + } + } + + transitions := rule.Transitions + if len(transitions) > 0 { + for _, transition := range transitions { + if (transition.Days != 0 && transition.CreatedBeforeDate != "") || (transition.Days == 0 && transition.CreatedBeforeDate == "") { + return fmt.Errorf("invalid transition lifecycle, must be set one of CreatedBeforeDate and Days") + } + } + } + + // NonVersionTransition is not suggested to use + // to keep compatible + if rule.NonVersionTransition != nil && len(rule.NonVersionTransitions) > 0 { + return fmt.Errorf("NonVersionTransition and NonVersionTransitions cannot both have values") + } else if rule.NonVersionTransition != nil { + rules[k].NonVersionTransitions = append(rules[k].NonVersionTransitions, *rule.NonVersionTransition) + } + } + + return nil +} + +// GetBucketLifecycleResult defines GetBucketLifecycle's result object +type GetBucketLifecycleResult LifecycleConfiguration + +// RefererXML defines Referer configuration +type RefererXML struct { + XMLName xml.Name `xml:"RefererConfiguration"` + AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // Allow empty referrer + RefererList []string `xml:"RefererList>Referer"` // Referer whitelist +} + +// GetBucketRefererResult defines result object for GetBucketReferer request +type GetBucketRefererResult RefererXML + +// LoggingXML defines logging configuration +type LoggingXML struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` + LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // The logging configuration information +} + +type loggingXMLEmpty struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` +} + +// LoggingEnabled defines the logging configuration information +type LoggingEnabled struct { + XMLName xml.Name `xml:"LoggingEnabled"` + TargetBucket string `xml:"TargetBucket"` // The bucket name for storing the log files + TargetPrefix string `xml:"TargetPrefix"` // The log file prefix +} + +// GetBucketLoggingResult defines the result from GetBucketLogging request +type GetBucketLoggingResult LoggingXML + +// WebsiteXML defines Website configuration +type WebsiteXML struct { + XMLName xml.Name `xml:"WebsiteConfiguration"` + IndexDocument IndexDocument `xml:"IndexDocument,omitempty"` // The index page + ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"` // The error page + RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` // The routing Rule list +} + +// IndexDocument defines the index page info +type IndexDocument struct { + XMLName xml.Name `xml:"IndexDocument"` + Suffix string `xml:"Suffix"` // The file name for the index page +} + +// ErrorDocument defines the 404 error page info +type ErrorDocument struct { + XMLName xml.Name `xml:"ErrorDocument"` + Key string `xml:"Key"` // 404 error file name +} + +// RoutingRule defines the routing rules +type RoutingRule struct { + XMLName xml.Name `xml:"RoutingRule"` + RuleNumber int `xml:"RuleNumber,omitempty"` // The routing number + Condition Condition `xml:"Condition,omitempty"` // The routing condition + Redirect Redirect `xml:"Redirect,omitempty"` // The routing redirect + +} + +// Condition defines codition in the RoutingRule +type Condition struct { + XMLName xml.Name `xml:"Condition"` + KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"` // Matching objcet prefix + HTTPErrorCodeReturnedEquals int `xml:"HttpErrorCodeReturnedEquals,omitempty"` // The rule is for Accessing to the specified object + IncludeHeader []IncludeHeader `xml:"IncludeHeader"` // The rule is for request which include header +} + +// IncludeHeader defines includeHeader in the RoutingRule's Condition +type IncludeHeader struct { + XMLName xml.Name `xml:"IncludeHeader"` + Key string `xml:"Key,omitempty"` // The Include header key + Equals string `xml:"Equals,omitempty"` // The Include header value +} + +// Redirect defines redirect in the RoutingRule +type Redirect struct { + XMLName xml.Name `xml:"Redirect"` + RedirectType string `xml:"RedirectType,omitempty"` // The redirect type, it have Mirror,External,Internal,AliCDN + PassQueryString *bool `xml:"PassQueryString"` // Whether to send the specified request's parameters, true or false + MirrorURL string `xml:"MirrorURL,omitempty"` // Mirror of the website address back to the source. + MirrorPassQueryString *bool `xml:"MirrorPassQueryString"` // To Mirror of the website Whether to send the specified request's parameters, true or false + MirrorFollowRedirect *bool `xml:"MirrorFollowRedirect"` // Redirect the location, if the mirror return 3XX + MirrorCheckMd5 *bool `xml:"MirrorCheckMd5"` // Check the mirror is MD5. + MirrorHeaders MirrorHeaders `xml:"MirrorHeaders,omitempty"` // Mirror headers + Protocol string `xml:"Protocol,omitempty"` // The redirect Protocol + HostName string `xml:"HostName,omitempty"` // The redirect HostName + ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"` // object name'Prefix replace the value + HttpRedirectCode int `xml:"HttpRedirectCode,omitempty"` // THe redirect http code + ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"` // object name replace the value +} + +// MirrorHeaders defines MirrorHeaders in the Redirect +type MirrorHeaders struct { + XMLName xml.Name `xml:"MirrorHeaders"` + PassAll *bool `xml:"PassAll"` // Penetrating all of headers to source website. + Pass []string `xml:"Pass"` // Penetrating some of headers to source website. + Remove []string `xml:"Remove"` // Prohibit passthrough some of headers to source website + Set []MirrorHeaderSet `xml:"Set"` // Setting some of headers send to source website +} + +// MirrorHeaderSet defines Set for Redirect's MirrorHeaders +type MirrorHeaderSet struct { + XMLName xml.Name `xml:"Set"` + Key string `xml:"Key,omitempty"` // The mirror header key + Value string `xml:"Value,omitempty"` // The mirror header value +} + +// GetBucketWebsiteResult defines the result from GetBucketWebsite request. +type GetBucketWebsiteResult WebsiteXML + +// CORSXML defines CORS configuration +type CORSXML struct { + XMLName xml.Name `xml:"CORSConfiguration"` + CORSRules []CORSRule `xml:"CORSRule"` // CORS rules +} + +// CORSRule defines CORS rules +type CORSRule struct { + XMLName xml.Name `xml:"CORSRule"` + AllowedOrigin []string `xml:"AllowedOrigin"` // Allowed origins. By default it's wildcard '*' + AllowedMethod []string `xml:"AllowedMethod"` // Allowed methods + AllowedHeader []string `xml:"AllowedHeader"` // Allowed headers + ExposeHeader []string `xml:"ExposeHeader"` // Allowed response headers + MaxAgeSeconds int `xml:"MaxAgeSeconds"` // Max cache ages in seconds +} + +// GetBucketCORSResult defines the result from GetBucketCORS request. +type GetBucketCORSResult CORSXML + +// GetBucketInfoResult defines the result from GetBucketInfo request. +type GetBucketInfoResult struct { + XMLName xml.Name `xml:"BucketInfo"` + BucketInfo BucketInfo `xml:"Bucket"` +} + +// BucketInfo defines Bucket information +type BucketInfo struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` // Bucket name + Location string `xml:"Location"` // Bucket datacenter + CreationDate time.Time `xml:"CreationDate"` // Bucket creation time + ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket external endpoint + IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket internal endpoint + ACL string `xml:"AccessControlList>Grant"` // Bucket ACL + RedundancyType string `xml:"DataRedundancyType"` // Bucket DataRedundancyType + Owner Owner `xml:"Owner"` // Bucket owner + StorageClass string `xml:"StorageClass"` // Bucket storage class + SseRule SSERule `xml:"ServerSideEncryptionRule"` // Bucket ServerSideEncryptionRule + Versioning string `xml:"Versioning"` // Bucket Versioning + TransferAcceleration string `xml:"TransferAcceleration"` // bucket TransferAcceleration + CrossRegionReplication string `xml:"CrossRegionReplication"` // bucket CrossRegionReplication +} + +type SSERule struct { + XMLName xml.Name `xml:"ServerSideEncryptionRule"` // Bucket ServerSideEncryptionRule + KMSMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` // Bucket KMSMasterKeyID + SSEAlgorithm string `xml:"SSEAlgorithm,omitempty"` // Bucket SSEAlgorithm + KMSDataEncryption string `xml:"KMSDataEncryption,omitempty"` //Bucket KMSDataEncryption +} + +// ListObjectsResult defines the result from ListObjects request +type ListObjectsResult struct { + XMLName xml.Name `xml:"ListBucketResult"` + Prefix string `xml:"Prefix"` // The object prefix + Marker string `xml:"Marker"` // The marker filter. + MaxKeys int `xml:"MaxKeys"` // Max keys to return + Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name + IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false) + NextMarker string `xml:"NextMarker"` // The start point of the next query + Objects []ObjectProperties `xml:"Contents"` // Object list + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter +} + +// ObjectProperties defines Objecct properties +type ObjectProperties struct { + XMLName xml.Name `xml:"Contents"` + Key string `xml:"Key"` // Object key + Type string `xml:"Type"` // Object type + Size int64 `xml:"Size"` // Object size + ETag string `xml:"ETag"` // Object ETag + Owner Owner `xml:"Owner"` // Object owner information + LastModified time.Time `xml:"LastModified"` // Object last modified time + StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive) +} + +// ListObjectsResultV2 defines the result from ListObjectsV2 request +type ListObjectsResultV2 struct { + XMLName xml.Name `xml:"ListBucketResult"` + Prefix string `xml:"Prefix"` // The object prefix + StartAfter string `xml:"StartAfter"` // the input StartAfter + ContinuationToken string `xml:"ContinuationToken"` // the input ContinuationToken + MaxKeys int `xml:"MaxKeys"` // Max keys to return + Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name + IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false) + NextContinuationToken string `xml:"NextContinuationToken"` // The start point of the next NextContinuationToken + Objects []ObjectProperties `xml:"Contents"` // Object list + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter +} + +// ListObjectVersionsResult defines the result from ListObjectVersions request +type ListObjectVersionsResult struct { + XMLName xml.Name `xml:"ListVersionsResult"` + Name string `xml:"Name"` // The Bucket Name + Owner Owner `xml:"Owner"` // The owner of bucket + Prefix string `xml:"Prefix"` // The object prefix + KeyMarker string `xml:"KeyMarker"` // The start marker filter. + VersionIdMarker string `xml:"VersionIdMarker"` // The start VersionIdMarker filter. + MaxKeys int `xml:"MaxKeys"` // Max keys to return + Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name + IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false) + NextKeyMarker string `xml:"NextKeyMarker"` // The start point of the next query + NextVersionIdMarker string `xml:"NextVersionIdMarker"` // The start point of the next query + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter + ObjectDeleteMarkers []ObjectDeleteMarkerProperties `xml:"DeleteMarker"` // DeleteMarker list + ObjectVersions []ObjectVersionProperties `xml:"Version"` // version list +} + +type ObjectDeleteMarkerProperties struct { + XMLName xml.Name `xml:"DeleteMarker"` + Key string `xml:"Key"` // The Object Key + VersionId string `xml:"VersionId"` // The Object VersionId + IsLatest bool `xml:"IsLatest"` // is current version or not + LastModified time.Time `xml:"LastModified"` // Object last modified time + Owner Owner `xml:"Owner"` // bucket owner element +} + +type ObjectVersionProperties struct { + XMLName xml.Name `xml:"Version"` + Key string `xml:"Key"` // The Object Key + VersionId string `xml:"VersionId"` // The Object VersionId + IsLatest bool `xml:"IsLatest"` // is latest version or not + LastModified time.Time `xml:"LastModified"` // Object last modified time + Type string `xml:"Type"` // Object type + Size int64 `xml:"Size"` // Object size + ETag string `xml:"ETag"` // Object ETag + StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive) + Owner Owner `xml:"Owner"` // bucket owner element +} + +// Owner defines Bucket/Object's owner +type Owner struct { + XMLName xml.Name `xml:"Owner"` + ID string `xml:"ID"` // Owner ID + DisplayName string `xml:"DisplayName"` // Owner's display name +} + +// CopyObjectResult defines result object of CopyObject +type CopyObjectResult struct { + XMLName xml.Name `xml:"CopyObjectResult"` + LastModified time.Time `xml:"LastModified"` // New object's last modified time. + ETag string `xml:"ETag"` // New object's ETag +} + +// GetObjectACLResult defines result of GetObjectACL request +type GetObjectACLResult GetBucketACLResult + +type deleteXML struct { + XMLName xml.Name `xml:"Delete"` + Objects []DeleteObject `xml:"Object"` // Objects to delete + Quiet bool `xml:"Quiet"` // Flag of quiet mode. +} + +// DeleteObject defines the struct for deleting object +type DeleteObject struct { + XMLName xml.Name `xml:"Object"` + Key string `xml:"Key"` // Object name + VersionId string `xml:"VersionId,omitempty"` // Object VersionId +} + +// DeleteObjectsResult defines result of DeleteObjects request +type DeleteObjectsResult struct { + XMLName xml.Name + DeletedObjects []string // Deleted object key list +} + +// DeleteObjectsResult_inner defines result of DeleteObjects request +type DeleteObjectVersionsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjectsDetail []DeletedKeyInfo `xml:"Deleted"` // Deleted object detail info +} + +// DeleteKeyInfo defines object delete info +type DeletedKeyInfo struct { + XMLName xml.Name `xml:"Deleted"` + Key string `xml:"Key"` // Object key + VersionId string `xml:"VersionId"` // VersionId + DeleteMarker bool `xml:"DeleteMarker"` // Object DeleteMarker + DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"` // Object DeleteMarkerVersionId +} + +// InitiateMultipartUploadResult defines result of InitiateMultipartUpload request +type InitiateMultipartUploadResult struct { + XMLName xml.Name `xml:"InitiateMultipartUploadResult"` + Bucket string `xml:"Bucket"` // Bucket name + Key string `xml:"Key"` // Object name to upload + UploadID string `xml:"UploadId"` // Generated UploadId +} + +// UploadPart defines the upload/copy part +type UploadPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` // Part number + ETag string `xml:"ETag"` // ETag value of the part's data +} + +type UploadParts []UploadPart + +func (slice UploadParts) Len() int { + return len(slice) +} + +func (slice UploadParts) Less(i, j int) bool { + return slice[i].PartNumber < slice[j].PartNumber +} + +func (slice UploadParts) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +// UploadPartCopyResult defines result object of multipart copy request. +type UploadPartCopyResult struct { + XMLName xml.Name `xml:"CopyPartResult"` + LastModified time.Time `xml:"LastModified"` // Last modified time + ETag string `xml:"ETag"` // ETag +} + +type completeMultipartUploadXML struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Part []UploadPart `xml:"Part"` +} + +// CompleteMultipartUploadResult defines result object of CompleteMultipartUploadRequest +type CompleteMultipartUploadResult struct { + XMLName xml.Name `xml:"CompleteMultipartUploadResult"` + Location string `xml:"Location"` // Object URL + Bucket string `xml:"Bucket"` // Bucket name + ETag string `xml:"ETag"` // Object ETag + Key string `xml:"Key"` // Object name +} + +// ListUploadedPartsResult defines result object of ListUploadedParts +type ListUploadedPartsResult struct { + XMLName xml.Name `xml:"ListPartsResult"` + Bucket string `xml:"Bucket"` // Bucket name + Key string `xml:"Key"` // Object name + UploadID string `xml:"UploadId"` // Upload ID + NextPartNumberMarker string `xml:"NextPartNumberMarker"` // Next part number + MaxParts int `xml:"MaxParts"` // Max parts count + IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries returned.false: all entries returned. + UploadedParts []UploadedPart `xml:"Part"` // Uploaded parts +} + +// UploadedPart defines uploaded part +type UploadedPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` // Part number + LastModified time.Time `xml:"LastModified"` // Last modified time + ETag string `xml:"ETag"` // ETag cache + Size int `xml:"Size"` // Part size +} + +// ListMultipartUploadResult defines result object of ListMultipartUpload +type ListMultipartUploadResult struct { + XMLName xml.Name `xml:"ListMultipartUploadsResult"` + Bucket string `xml:"Bucket"` // Bucket name + Delimiter string `xml:"Delimiter"` // Delimiter for grouping object. + Prefix string `xml:"Prefix"` // Object prefix + KeyMarker string `xml:"KeyMarker"` // Object key marker + UploadIDMarker string `xml:"UploadIdMarker"` // UploadId marker + NextKeyMarker string `xml:"NextKeyMarker"` // Next key marker, if not all entries returned. + NextUploadIDMarker string `xml:"NextUploadIdMarker"` // Next uploadId marker, if not all entries returned. + MaxUploads int `xml:"MaxUploads"` // Max uploads to return + IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries are returned. + Uploads []UncompletedUpload `xml:"Upload"` // Ongoing uploads (not completed, not aborted) + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // Common prefixes list. +} + +// UncompletedUpload structure wraps an uncompleted upload task +type UncompletedUpload struct { + XMLName xml.Name `xml:"Upload"` + Key string `xml:"Key"` // Object name + UploadID string `xml:"UploadId"` // The UploadId + Initiated time.Time `xml:"Initiated"` // Initialization time in the format such as 2012-02-23T04:18:23.000Z +} + +// ProcessObjectResult defines result object of ProcessObject +type ProcessObjectResult struct { + Bucket string `json:"bucket"` + FileSize int `json:"fileSize"` + Object string `json:"object"` + Status string `json:"status"` +} + +// decodeDeleteObjectsResult decodes deleting objects result in URL encoding +func decodeDeleteObjectsResult(result *DeleteObjectVersionsResult) error { + var err error + for i := 0; i < len(result.DeletedObjectsDetail); i++ { + result.DeletedObjectsDetail[i].Key, err = url.QueryUnescape(result.DeletedObjectsDetail[i].Key) + if err != nil { + return err + } + } + return nil +} + +// decodeListObjectsResult decodes list objects result in URL encoding +func decodeListObjectsResult(result *ListObjectsResult) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.Marker, err = url.QueryUnescape(result.Marker) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.NextMarker, err = url.QueryUnescape(result.NextMarker) + if err != nil { + return err + } + for i := 0; i < len(result.Objects); i++ { + result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// decodeListObjectsResult decodes list objects result in URL encoding +func decodeListObjectsResultV2(result *ListObjectsResultV2) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.StartAfter, err = url.QueryUnescape(result.StartAfter) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.NextContinuationToken, err = url.QueryUnescape(result.NextContinuationToken) + if err != nil { + return err + } + for i := 0; i < len(result.Objects); i++ { + result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// decodeListObjectVersionsResult decodes list version objects result in URL encoding +func decodeListObjectVersionsResult(result *ListObjectVersionsResult) error { + var err error + + // decode:Delimiter + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + + // decode Prefix + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + + // decode KeyMarker + result.KeyMarker, err = url.QueryUnescape(result.KeyMarker) + if err != nil { + return err + } + + // decode VersionIdMarker + result.VersionIdMarker, err = url.QueryUnescape(result.VersionIdMarker) + if err != nil { + return err + } + + // decode NextKeyMarker + result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker) + if err != nil { + return err + } + + // decode NextVersionIdMarker + result.NextVersionIdMarker, err = url.QueryUnescape(result.NextVersionIdMarker) + if err != nil { + return err + } + + // decode CommonPrefixes + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + + // decode deleteMarker + for i := 0; i < len(result.ObjectDeleteMarkers); i++ { + result.ObjectDeleteMarkers[i].Key, err = url.QueryUnescape(result.ObjectDeleteMarkers[i].Key) + if err != nil { + return err + } + } + + // decode ObjectVersions + for i := 0; i < len(result.ObjectVersions); i++ { + result.ObjectVersions[i].Key, err = url.QueryUnescape(result.ObjectVersions[i].Key) + if err != nil { + return err + } + } + + return nil +} + +// decodeListUploadedPartsResult decodes +func decodeListUploadedPartsResult(result *ListUploadedPartsResult) error { + var err error + result.Key, err = url.QueryUnescape(result.Key) + if err != nil { + return err + } + return nil +} + +// decodeListMultipartUploadResult decodes list multipart upload result in URL encoding +func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.KeyMarker, err = url.QueryUnescape(result.KeyMarker) + if err != nil { + return err + } + result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker) + if err != nil { + return err + } + for i := 0; i < len(result.Uploads); i++ { + result.Uploads[i].Key, err = url.QueryUnescape(result.Uploads[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// createBucketConfiguration defines the configuration for creating a bucket. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + StorageClass StorageClassType `xml:"StorageClass,omitempty"` + DataRedundancyType DataRedundancyType `xml:"DataRedundancyType,omitempty"` + ObjectHashFunction ObjecthashFuncType `xml:"ObjectHashFunction,omitempty"` +} + +// LiveChannelConfiguration defines the configuration for live-channel +type LiveChannelConfiguration struct { + XMLName xml.Name `xml:"LiveChannelConfiguration"` + Description string `xml:"Description,omitempty"` //Description of live-channel, up to 128 bytes + Status string `xml:"Status,omitempty"` //Specify the status of livechannel + Target LiveChannelTarget `xml:"Target"` //target configuration of live-channel + // use point instead of struct to avoid omit empty snapshot + Snapshot *LiveChannelSnapshot `xml:"Snapshot,omitempty"` //snapshot configuration of live-channel +} + +// LiveChannelTarget target configuration of live-channel +type LiveChannelTarget struct { + XMLName xml.Name `xml:"Target"` + Type string `xml:"Type"` //the type of object, only supports HLS + FragDuration int `xml:"FragDuration,omitempty"` //the length of each ts object (in seconds), in the range [1,100] + FragCount int `xml:"FragCount,omitempty"` //the number of ts objects in the m3u8 object, in the range of [1,100] + PlaylistName string `xml:"PlaylistName,omitempty"` //the name of m3u8 object, which must end with ".m3u8" and the length range is [6,128] +} + +// LiveChannelSnapshot snapshot configuration of live-channel +type LiveChannelSnapshot struct { + XMLName xml.Name `xml:"Snapshot"` + RoleName string `xml:"RoleName,omitempty"` //The role of snapshot operations, it sholud has write permission of DestBucket and the permission to send messages to the NotifyTopic. + DestBucket string `xml:"DestBucket,omitempty"` //Bucket the snapshots will be written to. should be the same owner as the source bucket. + NotifyTopic string `xml:"NotifyTopic,omitempty"` //Topics of MNS for notifying users of high frequency screenshot operation results + Interval int `xml:"Interval,omitempty"` //interval of snapshots, threre is no snapshot if no I-frame during the interval time +} + +// CreateLiveChannelResult the result of crete live-channel +type CreateLiveChannelResult struct { + XMLName xml.Name `xml:"CreateLiveChannelResult"` + PublishUrls []string `xml:"PublishUrls>Url"` //push urls list + PlayUrls []string `xml:"PlayUrls>Url"` //play urls list +} + +// LiveChannelStat the result of get live-channel state +type LiveChannelStat struct { + XMLName xml.Name `xml:"LiveChannelStat"` + Status string `xml:"Status"` //Current push status of live-channel: Disabled,Live,Idle + ConnectedTime time.Time `xml:"ConnectedTime"` //The time when the client starts pushing, format: ISO8601 + RemoteAddr string `xml:"RemoteAddr"` //The ip address of the client + Video LiveChannelVideo `xml:"Video"` //Video stream information + Audio LiveChannelAudio `xml:"Audio"` //Audio stream information +} + +// LiveChannelVideo video stream information +type LiveChannelVideo struct { + XMLName xml.Name `xml:"Video"` + Width int `xml:"Width"` //Width (unit: pixels) + Height int `xml:"Height"` //Height (unit: pixels) + FrameRate int `xml:"FrameRate"` //FramRate + Bandwidth int `xml:"Bandwidth"` //Bandwidth (unit: B/s) +} + +// LiveChannelAudio audio stream information +type LiveChannelAudio struct { + XMLName xml.Name `xml:"Audio"` + SampleRate int `xml:"SampleRate"` //SampleRate + Bandwidth int `xml:"Bandwidth"` //Bandwidth (unit: B/s) + Codec string `xml:"Codec"` //Encoding forma +} + +// LiveChannelHistory the result of GetLiveChannelHistory, at most return up to lastest 10 push records +type LiveChannelHistory struct { + XMLName xml.Name `xml:"LiveChannelHistory"` + Record []LiveRecord `xml:"LiveRecord"` //push records list +} + +// LiveRecord push recode +type LiveRecord struct { + XMLName xml.Name `xml:"LiveRecord"` + StartTime time.Time `xml:"StartTime"` //StartTime, format: ISO8601 + EndTime time.Time `xml:"EndTime"` //EndTime, format: ISO8601 + RemoteAddr string `xml:"RemoteAddr"` //The ip address of remote client +} + +// ListLiveChannelResult the result of ListLiveChannel +type ListLiveChannelResult struct { + XMLName xml.Name `xml:"ListLiveChannelResult"` + Prefix string `xml:"Prefix"` //Filter by the name start with the value of "Prefix" + Marker string `xml:"Marker"` //cursor from which starting list + MaxKeys int `xml:"MaxKeys"` //The maximum count returned. the default value is 100. it cannot be greater than 1000. + IsTruncated bool `xml:"IsTruncated"` //Indicates whether all results have been returned, "true" indicates partial results returned while "false" indicates all results have been returned + NextMarker string `xml:"NextMarker"` //NextMarker indicate the Marker value of the next request + LiveChannel []LiveChannelInfo `xml:"LiveChannel"` //The infomation of live-channel +} + +// LiveChannelInfo the infomation of live-channel +type LiveChannelInfo struct { + XMLName xml.Name `xml:"LiveChannel"` + Name string `xml:"Name"` //The name of live-channel + Description string `xml:"Description"` //Description of live-channel + Status string `xml:"Status"` //Status: disabled or enabled + LastModified time.Time `xml:"LastModified"` //Last modification time, format: ISO8601 + PublishUrls []string `xml:"PublishUrls>Url"` //push urls list + PlayUrls []string `xml:"PlayUrls>Url"` //play urls list +} + +// Tag a tag for the object +type Tag struct { + XMLName xml.Name `xml:"Tag"` + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +// Tagging tagset for the object +type Tagging struct { + XMLName xml.Name `xml:"Tagging"` + Tags []Tag `xml:"TagSet>Tag,omitempty"` +} + +// for GetObjectTagging return value +type GetObjectTaggingResult Tagging + +// VersioningConfig for the bucket +type VersioningConfig struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status string `xml:"Status"` +} + +type GetBucketVersioningResult VersioningConfig + +// Server Encryption rule for the bucket +type ServerEncryptionRule struct { + XMLName xml.Name `xml:"ServerSideEncryptionRule"` + SSEDefault SSEDefaultRule `xml:"ApplyServerSideEncryptionByDefault"` +} + +// Server Encryption deafult rule for the bucket +type SSEDefaultRule struct { + XMLName xml.Name `xml:"ApplyServerSideEncryptionByDefault"` + SSEAlgorithm string `xml:"SSEAlgorithm,omitempty"` + KMSMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` + KMSDataEncryption string `xml:"KMSDataEncryption,,omitempty"` +} + +type GetBucketEncryptionResult ServerEncryptionRule +type GetBucketTaggingResult Tagging + +type BucketStat struct { + XMLName xml.Name `xml:"BucketStat"` + Storage int64 `xml:"Storage"` + ObjectCount int64 `xml:"ObjectCount"` + MultipartUploadCount int64 `xml:"MultipartUploadCount"` +} +type GetBucketStatResult BucketStat + +// RequestPaymentConfiguration define the request payment configuration +type RequestPaymentConfiguration struct { + XMLName xml.Name `xml:"RequestPaymentConfiguration"` + Payer string `xml:"Payer,omitempty"` +} + +// BucketQoSConfiguration define QoS configuration +type BucketQoSConfiguration struct { + XMLName xml.Name `xml:"QoSConfiguration"` + TotalUploadBandwidth *int `xml:"TotalUploadBandwidth"` // Total upload bandwidth + IntranetUploadBandwidth *int `xml:"IntranetUploadBandwidth"` // Intranet upload bandwidth + ExtranetUploadBandwidth *int `xml:"ExtranetUploadBandwidth"` // Extranet upload bandwidth + TotalDownloadBandwidth *int `xml:"TotalDownloadBandwidth"` // Total download bandwidth + IntranetDownloadBandwidth *int `xml:"IntranetDownloadBandwidth"` // Intranet download bandwidth + ExtranetDownloadBandwidth *int `xml:"ExtranetDownloadBandwidth"` // Extranet download bandwidth + TotalQPS *int `xml:"TotalQps"` // Total Qps + IntranetQPS *int `xml:"IntranetQps"` // Intranet Qps + ExtranetQPS *int `xml:"ExtranetQps"` // Extranet Qps +} + +// UserQoSConfiguration define QoS and Range configuration +type UserQoSConfiguration struct { + XMLName xml.Name `xml:"QoSConfiguration"` + Region string `xml:"Region,omitempty"` // Effective area of Qos configuration + BucketQoSConfiguration +} + +////////////////////////////////////////////////////////////// +/////////////////// Select OBject //////////////////////////// +////////////////////////////////////////////////////////////// + +type CsvMetaRequest struct { + XMLName xml.Name `xml:"CsvMetaRequest"` + InputSerialization InputSerialization `xml:"InputSerialization"` + OverwriteIfExists *bool `xml:"OverwriteIfExists,omitempty"` +} + +// encodeBase64 encode base64 of the CreateSelectObjectMeta api request params +func (meta *CsvMetaRequest) encodeBase64() { + meta.InputSerialization.CSV.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(meta.InputSerialization.CSV.RecordDelimiter)) + meta.InputSerialization.CSV.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(meta.InputSerialization.CSV.FieldDelimiter)) + meta.InputSerialization.CSV.QuoteCharacter = + base64.StdEncoding.EncodeToString([]byte(meta.InputSerialization.CSV.QuoteCharacter)) +} + +type JsonMetaRequest struct { + XMLName xml.Name `xml:"JsonMetaRequest"` + InputSerialization InputSerialization `xml:"InputSerialization"` + OverwriteIfExists *bool `xml:"OverwriteIfExists,omitempty"` +} + +type InputSerialization struct { + XMLName xml.Name `xml:"InputSerialization"` + CSV CSV `xml:CSV,omitempty` + JSON JSON `xml:JSON,omitempty` + CompressionType string `xml:"CompressionType,omitempty"` +} +type CSV struct { + XMLName xml.Name `xml:"CSV"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` + FieldDelimiter string `xml:"FieldDelimiter,omitempty"` + QuoteCharacter string `xml:"QuoteCharacter,omitempty"` +} + +type JSON struct { + XMLName xml.Name `xml:"JSON"` + JSONType string `xml:"Type,omitempty"` +} + +// SelectRequest is for the SelectObject request params of json file +type SelectRequest struct { + XMLName xml.Name `xml:"SelectRequest"` + Expression string `xml:"Expression"` + InputSerializationSelect InputSerializationSelect `xml:"InputSerialization"` + OutputSerializationSelect OutputSerializationSelect `xml:"OutputSerialization"` + SelectOptions SelectOptions `xml:"Options,omitempty"` +} +type InputSerializationSelect struct { + XMLName xml.Name `xml:"InputSerialization"` + CsvBodyInput CSVSelectInput `xml:CSV,omitempty` + JsonBodyInput JSONSelectInput `xml:JSON,omitempty` + CompressionType string `xml:"CompressionType,omitempty"` +} +type CSVSelectInput struct { + XMLName xml.Name `xml:"CSV"` + FileHeaderInfo string `xml:"FileHeaderInfo,omitempty"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` + FieldDelimiter string `xml:"FieldDelimiter,omitempty"` + QuoteCharacter string `xml:"QuoteCharacter,omitempty"` + CommentCharacter string `xml:"CommentCharacter,omitempty"` + Range string `xml:"Range,omitempty"` + SplitRange string +} +type JSONSelectInput struct { + XMLName xml.Name `xml:"JSON"` + JSONType string `xml:"Type,omitempty"` + Range string `xml:"Range,omitempty"` + ParseJSONNumberAsString *bool `xml:"ParseJsonNumberAsString"` + SplitRange string +} + +func (jsonInput *JSONSelectInput) JsonIsEmpty() bool { + if jsonInput.JSONType != "" { + return false + } + return true +} + +type OutputSerializationSelect struct { + XMLName xml.Name `xml:"OutputSerialization"` + CsvBodyOutput CSVSelectOutput `xml:CSV,omitempty` + JsonBodyOutput JSONSelectOutput `xml:JSON,omitempty` + OutputRawData *bool `xml:"OutputRawData,omitempty"` + KeepAllColumns *bool `xml:"KeepAllColumns,omitempty"` + EnablePayloadCrc *bool `xml:"EnablePayloadCrc,omitempty"` + OutputHeader *bool `xml:"OutputHeader,omitempty"` +} +type CSVSelectOutput struct { + XMLName xml.Name `xml:"CSV"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` + FieldDelimiter string `xml:"FieldDelimiter,omitempty"` +} +type JSONSelectOutput struct { + XMLName xml.Name `xml:"JSON"` + RecordDelimiter string `xml:"RecordDelimiter,omitempty"` +} + +func (selectReq *SelectRequest) encodeBase64() { + if selectReq.InputSerializationSelect.JsonBodyInput.JsonIsEmpty() { + selectReq.csvEncodeBase64() + } else { + selectReq.jsonEncodeBase64() + } +} + +// csvEncodeBase64 encode base64 of the SelectObject api request params +func (selectReq *SelectRequest) csvEncodeBase64() { + selectReq.Expression = base64.StdEncoding.EncodeToString([]byte(selectReq.Expression)) + selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter)) + selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter)) + selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter = + base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter)) + selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter = + base64.StdEncoding.EncodeToString([]byte(selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter)) + selectReq.OutputSerializationSelect.CsvBodyOutput.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.OutputSerializationSelect.CsvBodyOutput.FieldDelimiter)) + selectReq.OutputSerializationSelect.CsvBodyOutput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.OutputSerializationSelect.CsvBodyOutput.RecordDelimiter)) + + // handle Range + if selectReq.InputSerializationSelect.CsvBodyInput.Range != "" { + selectReq.InputSerializationSelect.CsvBodyInput.Range = "line-range=" + selectReq.InputSerializationSelect.CsvBodyInput.Range + } + + if selectReq.InputSerializationSelect.CsvBodyInput.SplitRange != "" { + selectReq.InputSerializationSelect.CsvBodyInput.Range = "split-range=" + selectReq.InputSerializationSelect.CsvBodyInput.SplitRange + } +} + +// jsonEncodeBase64 encode base64 of the SelectObject api request params +func (selectReq *SelectRequest) jsonEncodeBase64() { + selectReq.Expression = base64.StdEncoding.EncodeToString([]byte(selectReq.Expression)) + selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter)) + + // handle Range + if selectReq.InputSerializationSelect.JsonBodyInput.Range != "" { + selectReq.InputSerializationSelect.JsonBodyInput.Range = "line-range=" + selectReq.InputSerializationSelect.JsonBodyInput.Range + } + + if selectReq.InputSerializationSelect.JsonBodyInput.SplitRange != "" { + selectReq.InputSerializationSelect.JsonBodyInput.Range = "split-range=" + selectReq.InputSerializationSelect.JsonBodyInput.SplitRange + } +} + +// CsvOptions is a element in the SelectObject api request's params +type SelectOptions struct { + XMLName xml.Name `xml:"Options"` + SkipPartialDataRecord *bool `xml:"SkipPartialDataRecord,omitempty"` + MaxSkippedRecordsAllowed string `xml:"MaxSkippedRecordsAllowed,omitempty"` +} + +// SelectObjectResult is the SelectObject api's return +type SelectObjectResult struct { + Version byte + FrameType int32 + PayloadLength int32 + HeaderCheckSum uint32 + Offset uint64 + Data string // DataFrame + EndFrame EndFrame // EndFrame + MetaEndFrameCSV MetaEndFrameCSV // MetaEndFrameCSV + MetaEndFrameJSON MetaEndFrameJSON // MetaEndFrameJSON + PayloadChecksum uint32 + ReadFlagInfo +} + +// ReadFlagInfo if reading the frame data, recode the reading status +type ReadFlagInfo struct { + OpenLine bool + ConsumedBytesLength int32 + EnablePayloadCrc bool + OutputRawData bool +} + +// EndFrame is EndFrameType of SelectObject api +type EndFrame struct { + TotalScanned int64 + HTTPStatusCode int32 + ErrorMsg string +} + +// MetaEndFrameCSV is MetaEndFrameCSVType of CreateSelectObjectMeta +type MetaEndFrameCSV struct { + TotalScanned int64 + Status int32 + SplitsCount int32 + RowsCount int64 + ColumnsCount int32 + ErrorMsg string +} + +// MetaEndFrameJSON is MetaEndFrameJSON of CreateSelectObjectMeta +type MetaEndFrameJSON struct { + TotalScanned int64 + Status int32 + SplitsCount int32 + RowsCount int64 + ErrorMsg string +} + +// InventoryConfiguration is Inventory config +type InventoryConfiguration struct { + XMLName xml.Name `xml:"InventoryConfiguration"` + Id string `xml:"Id,omitempty"` + IsEnabled *bool `xml:"IsEnabled,omitempty"` + Prefix string `xml:"Filter>Prefix,omitempty"` + OSSBucketDestination OSSBucketDestination `xml:"Destination>OSSBucketDestination,omitempty"` + Frequency string `xml:"Schedule>Frequency,omitempty"` + IncludedObjectVersions string `xml:"IncludedObjectVersions,omitempty"` + OptionalFields OptionalFields `xml:OptionalFields,omitempty` +} + +type OptionalFields struct { + XMLName xml.Name `xml:"OptionalFields,omitempty` + Field []string `xml:"Field,omitempty` +} + +type OSSBucketDestination struct { + XMLName xml.Name `xml:"OSSBucketDestination"` + Format string `xml:"Format,omitempty"` + AccountId string `xml:"AccountId,omitempty"` + RoleArn string `xml:"RoleArn,omitempty"` + Bucket string `xml:"Bucket,omitempty"` + Prefix string `xml:"Prefix,omitempty"` + Encryption *InvEncryption `xml:"Encryption,omitempty"` +} + +type InvEncryption struct { + XMLName xml.Name `xml:"Encryption"` + SseOss *InvSseOss `xml:"SSE-OSS"` + SseKms *InvSseKms `xml:"SSE-KMS"` +} + +type InvSseOss struct { + XMLName xml.Name `xml:"SSE-OSS"` +} + +type InvSseKms struct { + XMLName xml.Name `xml:"SSE-KMS"` + KmsId string `xml:"KeyId,omitempty"` +} + +type ListInventoryConfigurationsResult struct { + XMLName xml.Name `xml:"ListInventoryConfigurationsResult"` + InventoryConfiguration []InventoryConfiguration `xml:"InventoryConfiguration,omitempty` + IsTruncated *bool `xml:"IsTruncated,omitempty"` + NextContinuationToken string `xml:"NextContinuationToken,omitempty"` +} + +// RestoreConfiguration for RestoreObject +type RestoreConfiguration struct { + XMLName xml.Name `xml:"RestoreRequest"` + Days int32 `xml:"Days,omitempty"` + Tier string `xml:"JobParameters>Tier,omitempty"` +} + +// AsyncFetchTaskConfiguration for SetBucketAsyncFetchTask +type AsyncFetchTaskConfiguration struct { + XMLName xml.Name `xml:"AsyncFetchTaskConfiguration"` + Url string `xml:"Url,omitempty"` + Object string `xml:"Object,omitempty"` + Host string `xml:"Host,omitempty"` + ContentMD5 string `xml:"ContentMD5,omitempty"` + Callback string `xml:"Callback,omitempty"` + StorageClass string `xml:"StorageClass,omitempty"` + IgnoreSameKey bool `xml:"IgnoreSameKey"` +} + +// AsyncFetchTaskResult for SetBucketAsyncFetchTask result +type AsyncFetchTaskResult struct { + XMLName xml.Name `xml:"AsyncFetchTaskResult"` + TaskId string `xml:"TaskId,omitempty"` +} + +// AsynFetchTaskInfo for GetBucketAsyncFetchTask result +type AsynFetchTaskInfo struct { + XMLName xml.Name `xml:"AsyncFetchTaskInfo"` + TaskId string `xml:"TaskId,omitempty"` + State string `xml:"State,omitempty"` + ErrorMsg string `xml:"ErrorMsg,omitempty"` + TaskInfo AsyncTaskInfo `xml:"TaskInfo,omitempty"` +} + +// AsyncTaskInfo for async task information +type AsyncTaskInfo struct { + XMLName xml.Name `xml:"TaskInfo"` + Url string `xml:"Url,omitempty"` + Object string `xml:"Object,omitempty"` + Host string `xml:"Host,omitempty"` + ContentMD5 string `xml:"ContentMD5,omitempty"` + Callback string `xml:"Callback,omitempty"` + StorageClass string `xml:"StorageClass,omitempty"` + IgnoreSameKey bool `xml:"IgnoreSameKey"` +} + +// InitiateWormConfiguration define InitiateBucketWorm configuration +type InitiateWormConfiguration struct { + XMLName xml.Name `xml:"InitiateWormConfiguration"` + RetentionPeriodInDays int `xml:"RetentionPeriodInDays"` // specify retention days +} + +// ExtendWormConfiguration define ExtendWormConfiguration configuration +type ExtendWormConfiguration struct { + XMLName xml.Name `xml:"ExtendWormConfiguration"` + RetentionPeriodInDays int `xml:"RetentionPeriodInDays"` // specify retention days +} + +// WormConfiguration define WormConfiguration +type WormConfiguration struct { + XMLName xml.Name `xml:"WormConfiguration"` + WormId string `xml:"WormId,omitempty"` + State string `xml:"State,omitempty"` + RetentionPeriodInDays int `xml:"RetentionPeriodInDays"` // specify retention days + CreationDate string `xml:"CreationDate,omitempty"` +} + +// TransferAccConfiguration define transfer acceleration configuration +type TransferAccConfiguration struct { + XMLName xml.Name `xml:"TransferAccelerationConfiguration"` + Enabled bool `xml:"Enabled"` +} + +// ReplicationXML defines simple replication xml, and ReplicationXML is used for "DeleteBucketReplication" in client.go +type ReplicationXML struct { + XMLName xml.Name `xml:"ReplicationRules"` + ID string `xml:"ID,omitempty"` +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go new file mode 100644 index 000000000000..8b3ea09d2834 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go @@ -0,0 +1,552 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "time" +) + +// UploadFile is multipart file upload. +// +// objectKey the object name. +// filePath the local file path to upload. +// partSize the part size in byte. +// options the options for uploading object. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error { + if partSize < MinPartSize || partSize > MaxPartSize { + return errors.New("oss: part size invalid range (100KB, 5GB]") + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey) + if cpFilePath != "" { + return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines) + } + } + + return bucket.uploadFile(objectKey, filePath, partSize, options, routines) +} + +func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject) + absPath, _ := filepath.Abs(srcFile) + cpFileName := getCpFileName(absPath, dest, "") + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// ----- concurrent upload without checkpoint ----- + +// getCpConfig gets checkpoint configuration +func getCpConfig(options []Option) *cpConfig { + cpcOpt, err := FindOption(options, checkpointConfig, nil) + if err != nil || cpcOpt == nil { + return nil + } + + return cpcOpt.(*cpConfig) +} + +// getCpFileName return the name of the checkpoint file +func getCpFileName(src, dest, versionId string) string { + md5Ctx := md5.New() + md5Ctx.Write([]byte(src)) + srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + + md5Ctx.Reset() + md5Ctx.Write([]byte(dest)) + destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + + if versionId == "" { + return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum) + } + + md5Ctx.Reset() + md5Ctx.Write([]byte(versionId)) + versionCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + return fmt.Sprintf("%v-%v-%v.cp", srcCheckSum, destCheckSum, versionCheckSum) +} + +// getRoutines gets the routine count. by default it's 1. +func getRoutines(options []Option) int { + rtnOpt, err := FindOption(options, routineNum, nil) + if err != nil || rtnOpt == nil { + return 1 + } + + rs := rtnOpt.(int) + if rs < 1 { + rs = 1 + } else if rs > 100 { + rs = 100 + } + + return rs +} + +// getPayer return the payer of the request +func getPayer(options []Option) string { + payerOpt, err := FindOption(options, HTTPHeaderOssRequester, nil) + if err != nil || payerOpt == nil { + return "" + } + return payerOpt.(string) +} + +// GetProgressListener gets the progress callback +func GetProgressListener(options []Option) ProgressListener { + isSet, listener, _ := IsOptionSet(options, progressListener) + if !isSet { + return nil + } + return listener.(ProgressListener) +} + +// uploadPartHook is for testing usage +type uploadPartHook func(id int, chunk FileChunk) error + +var uploadPartHooker uploadPartHook = defaultUploadPart + +func defaultUploadPart(id int, chunk FileChunk) error { + return nil +} + +// workerArg defines worker argument structure +type workerArg struct { + bucket *Bucket + filePath string + imur InitiateMultipartUploadResult + options []Option + hook uploadPartHook +} + +// worker is the worker coroutine function +type defaultUploadProgressListener struct { +} + +// ProgressChanged no-ops +func (listener *defaultUploadProgressListener) ProgressChanged(event *ProgressEvent) { +} + +func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) { + for chunk := range jobs { + if err := arg.hook(id, chunk); err != nil { + failed <- err + break + } + var respHeader http.Header + p := Progress(&defaultUploadProgressListener{}) + opts := make([]Option, len(arg.options)+2) + opts = append(opts, arg.options...) + + // use defaultUploadProgressListener + opts = append(opts, p, GetResponseHeader(&respHeader)) + + startT := time.Now().UnixNano() / 1000 / 1000 / 1000 + part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, opts...) + endT := time.Now().UnixNano() / 1000 / 1000 / 1000 + if err != nil { + arg.bucket.Client.Config.WriteLog(Debug, "upload part error,cost:%d second,part number:%d,request id:%s,error:%s\n", endT-startT, chunk.Number, GetRequestId(respHeader), err.Error()) + failed <- err + break + } + select { + case <-die: + return + default: + } + results <- part + } +} + +// scheduler function +func scheduler(jobs chan FileChunk, chunks []FileChunk) { + for _, chunk := range chunks { + jobs <- chunk + } + close(jobs) +} + +func getTotalBytes(chunks []FileChunk) int64 { + var tb int64 + for _, chunk := range chunks { + tb += chunk.Size + } + return tb +} + +// uploadFile is a concurrent upload, without checkpoint +func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error { + listener := GetProgressListener(options) + + chunks, err := SplitFileByPartSize(filePath, partSize) + if err != nil { + return err + } + + partOptions := ChoiceTransferPartOption(options) + completeOptions := ChoiceCompletePartOption(options) + abortOptions := ChoiceAbortPartOption(options) + + // Initialize the multipart upload + imur, err := bucket.InitiateMultipartUpload(objectKey, options...) + if err != nil { + return err + } + + jobs := make(chan FileChunk, len(chunks)) + results := make(chan UploadPart, len(chunks)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getTotalBytes(chunks) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) + publishProgress(listener, event) + + // Start the worker coroutine + arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker} + for w := 1; w <= routines; w++ { + go worker(w, arg, jobs, results, failed, die) + } + + // Schedule the jobs + go scheduler(jobs, chunks) + + // Waiting for the upload finished + completed := 0 + parts := make([]UploadPart, len(chunks)) + for completed < len(chunks) { + select { + case part := <-results: + completed++ + parts[part.PartNumber-1] = part + completedBytes += chunks[part.PartNumber-1].Size + + // why RwBytes in ProgressEvent is 0 ? + // because read or write event has been notified in teeReader.Read() + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, chunks[part.PartNumber-1].Size) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + bucket.AbortMultipartUpload(imur, abortOptions...) + return err + } + + if completed >= len(chunks) { + break + } + } + + event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + + // Complete the multpart upload + _, err = bucket.CompleteMultipartUpload(imur, parts, completeOptions...) + if err != nil { + bucket.AbortMultipartUpload(imur, abortOptions...) + return err + } + return nil +} + +// ----- concurrent upload with checkpoint ----- +const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62" + +type uploadCheckpoint struct { + Magic string // Magic + MD5 string // Checkpoint file content's MD5 + FilePath string // Local file path + FileStat cpStat // File state + ObjectKey string // Key + UploadID string // Upload ID + Parts []cpPart // All parts of the local file +} + +type cpStat struct { + Size int64 // File size + LastModified time.Time // File's last modified time + MD5 string // Local file's MD5 +} + +type cpPart struct { + Chunk FileChunk // File chunk + Part UploadPart // Uploaded part + IsCompleted bool // Upload complete flag +} + +// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid. +func (cp uploadCheckpoint) isValid(filePath string) (bool, error) { + // Compare the CP's magic number and MD5. + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != uploadCpMagic || b64 != cp.MD5 { + return false, nil + } + + // Make sure if the local file is updated. + fd, err := os.Open(filePath) + if err != nil { + return false, err + } + defer fd.Close() + + st, err := fd.Stat() + if err != nil { + return false, err + } + + md, err := calcFileMD5(filePath) + if err != nil { + return false, err + } + + // Compare the file size, file's last modified time and file's MD5 + if cp.FileStat.Size != st.Size() || + !cp.FileStat.LastModified.Equal(st.ModTime()) || + cp.FileStat.MD5 != md { + return false, nil + } + + return true, nil +} + +// load loads from the file +func (cp *uploadCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// dump dumps to the local file +func (cp *uploadCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialization + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// updatePart updates the part status +func (cp *uploadCheckpoint) updatePart(part UploadPart) { + cp.Parts[part.PartNumber-1].Part = part + cp.Parts[part.PartNumber-1].IsCompleted = true +} + +// todoParts returns unfinished parts +func (cp *uploadCheckpoint) todoParts() []FileChunk { + fcs := []FileChunk{} + for _, part := range cp.Parts { + if !part.IsCompleted { + fcs = append(fcs, part.Chunk) + } + } + return fcs +} + +// allParts returns all parts +func (cp *uploadCheckpoint) allParts() []UploadPart { + ps := []UploadPart{} + for _, part := range cp.Parts { + ps = append(ps, part.Part) + } + return ps +} + +// getCompletedBytes returns completed bytes count +func (cp *uploadCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for _, part := range cp.Parts { + if part.IsCompleted { + completedBytes += part.Chunk.Size + } + } + return completedBytes +} + +// calcFileMD5 calculates the MD5 for the specified local file +func calcFileMD5(filePath string) (string, error) { + return "", nil +} + +// prepare initializes the multipart upload +func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error { + // CP + cp.Magic = uploadCpMagic + cp.FilePath = filePath + cp.ObjectKey = objectKey + + // Local file + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + st, err := fd.Stat() + if err != nil { + return err + } + cp.FileStat.Size = st.Size() + cp.FileStat.LastModified = st.ModTime() + md, err := calcFileMD5(filePath) + if err != nil { + return err + } + cp.FileStat.MD5 = md + + // Chunks + parts, err := SplitFileByPartSize(filePath, partSize) + if err != nil { + return err + } + + cp.Parts = make([]cpPart, len(parts)) + for i, part := range parts { + cp.Parts[i].Chunk = part + cp.Parts[i].IsCompleted = false + } + + // Init load + imur, err := bucket.InitiateMultipartUpload(objectKey, options...) + if err != nil { + return err + } + cp.UploadID = imur.UploadID + + return nil +} + +// complete completes the multipart upload and deletes the local CP files +func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { + imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName, + Key: cp.ObjectKey, UploadID: cp.UploadID} + _, err := bucket.CompleteMultipartUpload(imur, parts, options...) + if err != nil { + return err + } + os.Remove(cpFilePath) + return err +} + +// uploadFileWithCp handles concurrent upload with checkpoint +func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error { + listener := GetProgressListener(options) + + partOptions := ChoiceTransferPartOption(options) + completeOptions := ChoiceCompletePartOption(options) + + // Load CP data + ucp := uploadCheckpoint{} + err := ucp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // Load error or the CP data is invalid. + valid, err := ucp.isValid(filePath) + if err != nil || !valid { + if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil { + return err + } + os.Remove(cpFilePath) + } + + chunks := ucp.todoParts() + imur := InitiateMultipartUploadResult{ + Bucket: bucket.BucketName, + Key: objectKey, + UploadID: ucp.UploadID} + + jobs := make(chan FileChunk, len(chunks)) + results := make(chan UploadPart, len(chunks)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := ucp.getCompletedBytes() + + // why RwBytes in ProgressEvent is 0 ? + // because read or write event has been notified in teeReader.Read() + event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + + // Start the workers + arg := workerArg{&bucket, filePath, imur, partOptions, uploadPartHooker} + for w := 1; w <= routines; w++ { + go worker(w, arg, jobs, results, failed, die) + } + + // Schedule jobs + go scheduler(jobs, chunks) + + // Waiting for the job finished + completed := 0 + for completed < len(chunks) { + select { + case part := <-results: + completed++ + ucp.updatePart(part) + ucp.dump(cpFilePath) + completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size + event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size, ucp.Parts[part.PartNumber-1].Chunk.Size) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(chunks) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + + // Complete the multipart upload + err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, completeOptions) + return err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go new file mode 100644 index 000000000000..78b7913006ac --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go @@ -0,0 +1,522 @@ +package oss + +import ( + "bytes" + "errors" + "fmt" + "hash/crc32" + "hash/crc64" + "io" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "time" +) + +var sys_name string +var sys_release string +var sys_machine string + +func init() { + sys_name = runtime.GOOS + sys_release = "-" + sys_machine = runtime.GOARCH + + if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil { + sys_name = string(bytes.TrimSpace(out)) + } + if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil { + sys_release = string(bytes.TrimSpace(out)) + } + if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil { + sys_machine = string(bytes.TrimSpace(out)) + } +} + +// userAgent gets user agent +// It has the SDK version information, OS information and GO version +func userAgent() string { + sys := getSysInfo() + return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name, + sys.release, sys.machine, runtime.Version()) +} + +type sysInfo struct { + name string // OS name such as windows/Linux + release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc + machine string // CPU type amd64/x86_64 +} + +// getSysInfo gets system info +// gets the OS information and CPU type +func getSysInfo() sysInfo { + return sysInfo{name: sys_name, release: sys_release, machine: sys_machine} +} + +// GetRangeConfig gets the download range from the options. +func GetRangeConfig(options []Option) (*UnpackedRange, error) { + rangeOpt, err := FindOption(options, HTTPHeaderRange, nil) + if err != nil || rangeOpt == nil { + return nil, err + } + return ParseRange(rangeOpt.(string)) +} + +// UnpackedRange +type UnpackedRange struct { + HasStart bool // Flag indicates if the start point is specified + HasEnd bool // Flag indicates if the end point is specified + Start int64 // Start point + End int64 // End point +} + +// InvalidRangeError returns invalid range error +func InvalidRangeError(r string) error { + return fmt.Errorf("InvalidRange %s", r) +} + +func GetRangeString(unpackRange UnpackedRange) string { + var strRange string + if unpackRange.HasStart && unpackRange.HasEnd { + strRange = fmt.Sprintf("%d-%d", unpackRange.Start, unpackRange.End) + } else if unpackRange.HasStart { + strRange = fmt.Sprintf("%d-", unpackRange.Start) + } else if unpackRange.HasEnd { + strRange = fmt.Sprintf("-%d", unpackRange.End) + } + return strRange +} + +// ParseRange parse various styles of range such as bytes=M-N +func ParseRange(normalizedRange string) (*UnpackedRange, error) { + var err error + hasStart := false + hasEnd := false + var start int64 + var end int64 + + // Bytes==M-N or ranges=M-N + nrSlice := strings.Split(normalizedRange, "=") + if len(nrSlice) != 2 || nrSlice[0] != "bytes" { + return nil, InvalidRangeError(normalizedRange) + } + + // Bytes=M-N,X-Y + rSlice := strings.Split(nrSlice[1], ",") + rStr := rSlice[0] + + if strings.HasSuffix(rStr, "-") { // M- + startStr := rStr[:len(rStr)-1] + start, err = strconv.ParseInt(startStr, 10, 64) + if err != nil { + return nil, InvalidRangeError(normalizedRange) + } + hasStart = true + } else if strings.HasPrefix(rStr, "-") { // -N + len := rStr[1:] + end, err = strconv.ParseInt(len, 10, 64) + if err != nil { + return nil, InvalidRangeError(normalizedRange) + } + if end == 0 { // -0 + return nil, InvalidRangeError(normalizedRange) + } + hasEnd = true + } else { // M-N + valSlice := strings.Split(rStr, "-") + if len(valSlice) != 2 { + return nil, InvalidRangeError(normalizedRange) + } + start, err = strconv.ParseInt(valSlice[0], 10, 64) + if err != nil { + return nil, InvalidRangeError(normalizedRange) + } + hasStart = true + end, err = strconv.ParseInt(valSlice[1], 10, 64) + if err != nil { + return nil, InvalidRangeError(normalizedRange) + } + hasEnd = true + } + + return &UnpackedRange{hasStart, hasEnd, start, end}, nil +} + +// AdjustRange returns adjusted range, adjust the range according to the length of the file +func AdjustRange(ur *UnpackedRange, size int64) (start, end int64) { + if ur == nil { + return 0, size + } + + if ur.HasStart && ur.HasEnd { + start = ur.Start + end = ur.End + 1 + if ur.Start < 0 || ur.Start >= size || ur.End > size || ur.Start > ur.End { + start = 0 + end = size + } + } else if ur.HasStart { + start = ur.Start + end = size + if ur.Start < 0 || ur.Start >= size { + start = 0 + } + } else if ur.HasEnd { + start = size - ur.End + end = size + if ur.End < 0 || ur.End > size { + start = 0 + end = size + } + } + return +} + +// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC. +// gets the current time in Unix time, in seconds. +func GetNowSec() int64 { + return time.Now().Unix() +} + +// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. The result is undefined if the Unix time +// in nanoseconds cannot be represented by an int64. Note that this +// means the result of calling UnixNano on the zero Time is undefined. +// gets the current time in Unix time, in nanoseconds. +func GetNowNanoSec() int64 { + return time.Now().UnixNano() +} + +// GetNowGMT gets the current time in GMT format. +func GetNowGMT() string { + return time.Now().UTC().Format(http.TimeFormat) +} + +// FileChunk is the file chunk definition +type FileChunk struct { + Number int // Chunk number + Offset int64 // Chunk offset + Size int64 // Chunk size. +} + +// SplitFileByPartNum splits big file into parts by the num of parts. +// Split the file with specified parts count, returns the split result when error is nil. +func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) { + if chunkNum <= 0 || chunkNum > 10000 { + return nil, errors.New("chunkNum invalid") + } + + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, err + } + + if int64(chunkNum) > stat.Size() { + return nil, errors.New("oss: chunkNum invalid") + } + + var chunks []FileChunk + var chunk = FileChunk{} + var chunkN = (int64)(chunkNum) + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * (stat.Size() / chunkN) + if i == chunkN-1 { + chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN + } else { + chunk.Size = stat.Size() / chunkN + } + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// SplitFileByPartSize splits big file into parts by the size of parts. +// Splits the file by the part size. Returns the FileChunk when error is nil. +func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) { + if chunkSize <= 0 { + return nil, errors.New("chunkSize invalid") + } + + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, err + } + var chunkN = stat.Size() / chunkSize + if chunkN >= 10000 { + return nil, errors.New("Too many parts, please increase part size") + } + + var chunks []FileChunk + var chunk = FileChunk{} + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * chunkSize + chunk.Size = chunkSize + chunks = append(chunks, chunk) + } + + if stat.Size()%chunkSize > 0 { + chunk.Number = len(chunks) + 1 + chunk.Offset = int64(len(chunks)) * chunkSize + chunk.Size = stat.Size() % chunkSize + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// GetPartEnd calculates the end position +func GetPartEnd(begin int64, total int64, per int64) int64 { + if begin+per > total { + return total - 1 + } + return begin + per - 1 +} + +// CrcTable returns the table constructed from the specified polynomial +var CrcTable = func() *crc64.Table { + return crc64.MakeTable(crc64.ECMA) +} + +// CrcTable returns the table constructed from the specified polynomial +var crc32Table = func() *crc32.Table { + return crc32.MakeTable(crc32.IEEE) +} + +// choiceTransferPartOption choices valid option supported by Uploadpart or DownloadPart +func ChoiceTransferPartOption(options []Option) []Option { + var outOption []Option + + listener, _ := FindOption(options, progressListener, nil) + if listener != nil { + outOption = append(outOption, Progress(listener.(ProgressListener))) + } + + payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) + if payer != nil { + outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) + } + + versionId, _ := FindOption(options, "versionId", nil) + if versionId != nil { + outOption = append(outOption, VersionId(versionId.(string))) + } + + trafficLimit, _ := FindOption(options, HTTPHeaderOssTrafficLimit, nil) + if trafficLimit != nil { + speed, _ := strconv.ParseInt(trafficLimit.(string), 10, 64) + outOption = append(outOption, TrafficLimitHeader(speed)) + } + + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) + } + + return outOption +} + +// ChoiceCompletePartOption choices valid option supported by CompleteMulitiPart +func ChoiceCompletePartOption(options []Option) []Option { + var outOption []Option + + listener, _ := FindOption(options, progressListener, nil) + if listener != nil { + outOption = append(outOption, Progress(listener.(ProgressListener))) + } + + payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) + if payer != nil { + outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) + } + + acl, _ := FindOption(options, HTTPHeaderOssObjectACL, nil) + if acl != nil { + outOption = append(outOption, ObjectACL(ACLType(acl.(string)))) + } + + callback, _ := FindOption(options, HTTPHeaderOssCallback, nil) + if callback != nil { + outOption = append(outOption, Callback(callback.(string))) + } + + callbackVar, _ := FindOption(options, HTTPHeaderOssCallbackVar, nil) + if callbackVar != nil { + outOption = append(outOption, CallbackVar(callbackVar.(string))) + } + + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) + } + + forbidOverWrite, _ := FindOption(options, HTTPHeaderOssForbidOverWrite, nil) + if forbidOverWrite != nil { + if forbidOverWrite.(string) == "true" { + outOption = append(outOption, ForbidOverWrite(true)) + } else { + outOption = append(outOption, ForbidOverWrite(false)) + } + } + + return outOption +} + +// ChoiceAbortPartOption choices valid option supported by AbortMultipartUpload +func ChoiceAbortPartOption(options []Option) []Option { + var outOption []Option + payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) + if payer != nil { + outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) + } + + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) + } + + return outOption +} + +// ChoiceHeadObjectOption choices valid option supported by HeadObject +func ChoiceHeadObjectOption(options []Option) []Option { + var outOption []Option + + // not select HTTPHeaderRange to get whole object length + payer, _ := FindOption(options, HTTPHeaderOssRequester, nil) + if payer != nil { + outOption = append(outOption, RequestPayer(PayerType(payer.(string)))) + } + + versionId, _ := FindOption(options, "versionId", nil) + if versionId != nil { + outOption = append(outOption, VersionId(versionId.(string))) + } + + respHeader, _ := FindOption(options, responseHeader, nil) + if respHeader != nil { + outOption = append(outOption, GetResponseHeader(respHeader.(*http.Header))) + } + + return outOption +} + +func CheckBucketName(bucketName string) error { + nameLen := len(bucketName) + if nameLen < 3 || nameLen > 63 { + return fmt.Errorf("bucket name %s len is between [3-63],now is %d", bucketName, nameLen) + } + + for _, v := range bucketName { + if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') { + return fmt.Errorf("bucket name %s can only include lowercase letters, numbers, and -", bucketName) + } + } + if bucketName[0] == '-' || bucketName[nameLen-1] == '-' { + return fmt.Errorf("bucket name %s must start and end with a lowercase letter or number", bucketName) + } + return nil +} + +func GetReaderLen(reader io.Reader) (int64, error) { + var contentLength int64 + var err error + switch v := reader.(type) { + case *bytes.Buffer: + contentLength = int64(v.Len()) + case *bytes.Reader: + contentLength = int64(v.Len()) + case *strings.Reader: + contentLength = int64(v.Len()) + case *os.File: + fInfo, fError := v.Stat() + if fError != nil { + err = fmt.Errorf("can't get reader content length,%s", fError.Error()) + } else { + contentLength = fInfo.Size() + } + case *io.LimitedReader: + contentLength = int64(v.N) + case *LimitedReadCloser: + contentLength = int64(v.N) + default: + err = fmt.Errorf("can't get reader content length,unkown reader type") + } + return contentLength, err +} + +func LimitReadCloser(r io.Reader, n int64) io.Reader { + var lc LimitedReadCloser + lc.R = r + lc.N = n + return &lc +} + +// LimitedRC support Close() +type LimitedReadCloser struct { + io.LimitedReader +} + +func (lc *LimitedReadCloser) Close() error { + if closer, ok := lc.R.(io.ReadCloser); ok { + return closer.Close() + } + return nil +} + +type DiscardReadCloser struct { + RC io.ReadCloser + Discard int +} + +func (drc *DiscardReadCloser) Read(b []byte) (int, error) { + n, err := drc.RC.Read(b) + if drc.Discard == 0 || n <= 0 { + return n, err + } + + if n <= drc.Discard { + drc.Discard -= n + return 0, err + } + + realLen := n - drc.Discard + copy(b[0:realLen], b[drc.Discard:n]) + drc.Discard = 0 + return realLen, err +} + +func (drc *DiscardReadCloser) Close() error { + closer, ok := drc.RC.(io.ReadCloser) + if ok { + return closer.Close() + } + return nil +} + +func ConvertEmptyValueToNil(params map[string]interface{}, keys []string) { + for _, key := range keys { + value, ok := params[key] + if ok && value == "" { + // convert "" to nil + params[key] = nil + } + } +} diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index 803ba787d0da..812828be77f9 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,26 @@ # Change Log +## [v1.95.0] - 2023-01-23 + +- #595 - @dweinshenker - Add UpgradeMajorVersion to godo + +## [v1.94.0] - 2022-01-23 + +- #596 - @DMW2151 - DBAAS-3906: Include updatePool for DB Clusters +- #593 - @danaelhe - Add Uptime Checks and Alerts Support + +## [v1.93.0] - 2022-12-15 + +- #591 - @andrewsomething - tokens: Add initial support for new API. + +## [v1.92.0] - 2022-12-14 + +- #589 - @wez470 - load-balancers: Minor doc fixup +- #585 - @StephenVarela - Add firewall support for load balancers +- #587 - @StephenVarela - Support new http alerts for load balancers +- #586 - @andrewsomething - godo.go: Sort service lists. +- #583 - @ddebarros - Adds support for functions trigger API + ## [v1.91.1] - 2022-11-23 - #582 - @StephenVarela - Load Balancers: Support new endpoints for http alerts diff --git a/vendor/github.com/digitalocean/godo/databases.go b/vendor/github.com/digitalocean/godo/databases.go index 69bd6c9f52d1..c70e96561774 100644 --- a/vendor/github.com/digitalocean/godo/databases.go +++ b/vendor/github.com/digitalocean/godo/databases.go @@ -9,27 +9,28 @@ import ( ) const ( - databaseBasePath = "/v2/databases" - databaseSinglePath = databaseBasePath + "/%s" - databaseCAPath = databaseBasePath + "/%s/ca" - databaseConfigPath = databaseBasePath + "/%s/config" - databaseResizePath = databaseBasePath + "/%s/resize" - databaseMigratePath = databaseBasePath + "/%s/migrate" - databaseMaintenancePath = databaseBasePath + "/%s/maintenance" - databaseBackupsPath = databaseBasePath + "/%s/backups" - databaseUsersPath = databaseBasePath + "/%s/users" - databaseUserPath = databaseBasePath + "/%s/users/%s" - databaseResetUserAuthPath = databaseUserPath + "/reset_auth" - databaseDBPath = databaseBasePath + "/%s/dbs/%s" - databaseDBsPath = databaseBasePath + "/%s/dbs" - databasePoolPath = databaseBasePath + "/%s/pools/%s" - databasePoolsPath = databaseBasePath + "/%s/pools" - databaseReplicaPath = databaseBasePath + "/%s/replicas/%s" - databaseReplicasPath = databaseBasePath + "/%s/replicas" - databaseEvictionPolicyPath = databaseBasePath + "/%s/eviction_policy" - databaseSQLModePath = databaseBasePath + "/%s/sql_mode" - databaseFirewallRulesPath = databaseBasePath + "/%s/firewall" - databaseOptionsPath = databaseBasePath + "/options" + databaseBasePath = "/v2/databases" + databaseSinglePath = databaseBasePath + "/%s" + databaseCAPath = databaseBasePath + "/%s/ca" + databaseConfigPath = databaseBasePath + "/%s/config" + databaseResizePath = databaseBasePath + "/%s/resize" + databaseMigratePath = databaseBasePath + "/%s/migrate" + databaseMaintenancePath = databaseBasePath + "/%s/maintenance" + databaseBackupsPath = databaseBasePath + "/%s/backups" + databaseUsersPath = databaseBasePath + "/%s/users" + databaseUserPath = databaseBasePath + "/%s/users/%s" + databaseResetUserAuthPath = databaseUserPath + "/reset_auth" + databaseDBPath = databaseBasePath + "/%s/dbs/%s" + databaseDBsPath = databaseBasePath + "/%s/dbs" + databasePoolPath = databaseBasePath + "/%s/pools/%s" + databasePoolsPath = databaseBasePath + "/%s/pools" + databaseReplicaPath = databaseBasePath + "/%s/replicas/%s" + databaseReplicasPath = databaseBasePath + "/%s/replicas" + databaseEvictionPolicyPath = databaseBasePath + "/%s/eviction_policy" + databaseSQLModePath = databaseBasePath + "/%s/sql_mode" + databaseFirewallRulesPath = databaseBasePath + "/%s/firewall" + databaseOptionsPath = databaseBasePath + "/options" + databaseUpgradeMajorVersionPath = databaseBasePath + "/%s/upgrade" ) // SQL Mode constants allow for MySQL-specific SQL flavor configuration. @@ -124,6 +125,7 @@ type DatabasesService interface { CreatePool(context.Context, string, *DatabaseCreatePoolRequest) (*DatabasePool, *Response, error) GetPool(context.Context, string, string) (*DatabasePool, *Response, error) DeletePool(context.Context, string, string) (*Response, error) + UpdatePool(context.Context, string, string, *DatabaseUpdatePoolRequest) (*Response, error) GetReplica(context.Context, string, string) (*DatabaseReplica, *Response, error) ListReplicas(context.Context, string, *ListOptions) ([]DatabaseReplica, *Response, error) CreateReplica(context.Context, string, *DatabaseCreateReplicaRequest) (*DatabaseReplica, *Response, error) @@ -141,6 +143,7 @@ type DatabasesService interface { UpdateRedisConfig(context.Context, string, *RedisConfig) (*Response, error) UpdateMySQLConfig(context.Context, string, *MySQLConfig) (*Response, error) ListOptions(todo context.Context) (*DatabaseOptions, *Response, error) + UpgradeMajorVersion(context.Context, string, *UpgradeVersionRequest) (*Response, error) } // DatabasesServiceOp handles communication with the Databases related methods @@ -299,6 +302,14 @@ type DatabaseCreatePoolRequest struct { Mode string `json:"mode"` } +// DatabaseUpdatePoolRequest is used to update a database connection pool +type DatabaseUpdatePoolRequest struct { + User string `json:"user,omitempty"` + Size int `json:"size"` + Database string `json:"db"` + Mode string `json:"mode"` +} + // DatabaseCreateUserRequest is used to create a new database user type DatabaseCreateUserRequest struct { Name string `json:"name"` @@ -521,6 +532,10 @@ type evictionPolicyRoot struct { EvictionPolicy string `json:"eviction_policy"` } +type UpgradeVersionRequest struct { + Version string `json:"version"` +} + type sqlModeRoot struct { SQLMode string `json:"sql_mode"` } @@ -904,6 +919,37 @@ func (svc *DatabasesServiceOp) DeletePool(ctx context.Context, databaseID, name return resp, nil } +// UpdatePool will update an existing database connection pool +func (svc *DatabasesServiceOp) UpdatePool(ctx context.Context, databaseID, name string, updatePool *DatabaseUpdatePoolRequest) (*Response, error) { + path := fmt.Sprintf(databasePoolPath, databaseID, name) + + if updatePool == nil { + return nil, NewArgError("updatePool", "cannot be nil") + } + + if updatePool.Mode == "" { + return nil, NewArgError("mode", "cannot be empty") + } + + if updatePool.Database == "" { + return nil, NewArgError("database", "cannot be empty") + } + + if updatePool.Size < 1 { + return nil, NewArgError("size", "cannot be less than 1") + } + + req, err := svc.client.NewRequest(ctx, http.MethodPut, path, updatePool) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + // GetReplica returns a single database replica func (svc *DatabasesServiceOp) GetReplica(ctx context.Context, databaseID, name string) (*DatabaseReplica, *Response, error) { path := fmt.Sprintf(databaseReplicaPath, databaseID, name) @@ -1179,3 +1225,19 @@ func (svc *DatabasesServiceOp) ListOptions(ctx context.Context) (*DatabaseOption return root.Options, resp, nil } + +// UpgradeMajorVersion upgrades the major version of a cluster. +func (svc *DatabasesServiceOp) UpgradeMajorVersion(ctx context.Context, databaseID string, upgradeReq *UpgradeVersionRequest) (*Response, error) { + path := fmt.Sprintf(databaseUpgradeMajorVersionPath, databaseID) + req, err := svc.client.NewRequest(ctx, http.MethodPut, path, upgradeReq) + if err != nil { + return nil, err + } + + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/functions.go b/vendor/github.com/digitalocean/godo/functions.go index b3829367a0aa..61c80778af25 100644 --- a/vendor/github.com/digitalocean/godo/functions.go +++ b/vendor/github.com/digitalocean/godo/functions.go @@ -8,8 +8,9 @@ import ( ) const ( - functionsBasePath = "/v2/functions/namespaces" - functionsNamespacePath = functionsBasePath + "/%s" + functionsBasePath = "/v2/functions/namespaces" + functionsNamespacePath = functionsBasePath + "/%s" + functionsTriggerBasePath = functionsNamespacePath + "/triggers" ) type FunctionsService interface { @@ -17,6 +18,12 @@ type FunctionsService interface { GetNamespace(context.Context, string) (*FunctionsNamespace, *Response, error) CreateNamespace(context.Context, *FunctionsNamespaceCreateRequest) (*FunctionsNamespace, *Response, error) DeleteNamespace(context.Context, string) (*Response, error) + + ListTriggers(context.Context, string) ([]FunctionsTrigger, *Response, error) + GetTrigger(context.Context, string, string) (*FunctionsTrigger, *Response, error) + CreateTrigger(context.Context, string, *FunctionsTriggerCreateRequest) (*FunctionsTrigger, *Response, error) + UpdateTrigger(context.Context, string, string, *FunctionsTriggerUpdateRequest) (*FunctionsTrigger, *Response, error) + DeleteTrigger(context.Context, string, string) (*Response, error) } type FunctionsServiceOp struct { @@ -49,6 +56,49 @@ type FunctionsNamespaceCreateRequest struct { Region string `json:"region"` } +type triggersRoot struct { + Triggers []FunctionsTrigger `json:"triggers,omitempty"` +} + +type triggerRoot struct { + Trigger *FunctionsTrigger `json:"trigger,omitempty"` +} + +type FunctionsTrigger struct { + Namespace string `json:"namespace,omitempty"` + Function string `json:"function,omitempty"` + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + IsEnabled bool `json:"is_enabled"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + ScheduledDetails *TriggerScheduledDetails `json:"scheduled_details,omitempty"` + ScheduledRuns *TriggerScheduledRuns `json:"scheduled_runs,omitempty"` +} + +type TriggerScheduledDetails struct { + Cron string `json:"cron,omitempty"` + Body map[string]interface{} `json:"body,omitempty"` +} + +type TriggerScheduledRuns struct { + LastRunAt time.Time `json:"last_run_at,omitempty"` + NextRunAt time.Time `json:"next_run_at,omitempty"` +} + +type FunctionsTriggerCreateRequest struct { + Name string `json:"name"` + Type string `json:"type"` + Function string `json:"function"` + IsEnabled bool `json:"is_enabled"` + ScheduledDetails *TriggerScheduledDetails `json:"scheduled_details,omitempty"` +} + +type FunctionsTriggerUpdateRequest struct { + IsEnabled *bool `json:"is_enabled,omitempty"` + ScheduledDetails *TriggerScheduledDetails `json:"scheduled_details,omitempty"` +} + // Gets a list of namespaces func (s *FunctionsServiceOp) ListNamespaces(ctx context.Context) ([]FunctionsNamespace, *Response, error) { req, err := s.client.NewRequest(ctx, http.MethodGet, functionsBasePath, nil) @@ -108,3 +158,79 @@ func (s *FunctionsServiceOp) DeleteNamespace(ctx context.Context, namespace stri } return resp, nil } + +// ListTriggers gets a list of triggers +func (s *FunctionsServiceOp) ListTriggers(ctx context.Context, namespace string) ([]FunctionsTrigger, *Response, error) { + path := fmt.Sprintf(functionsTriggerBasePath, namespace) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(triggersRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Triggers, resp, nil +} + +// GetTrigger gets a single trigger +func (s *FunctionsServiceOp) GetTrigger(ctx context.Context, namespace string, trigger string) (*FunctionsTrigger, *Response, error) { + path := fmt.Sprintf(functionsTriggerBasePath+"/%s", namespace, trigger) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(triggerRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Trigger, resp, nil +} + +// CreateTrigger creates a trigger +func (s *FunctionsServiceOp) CreateTrigger(ctx context.Context, namespace string, opts *FunctionsTriggerCreateRequest) (*FunctionsTrigger, *Response, error) { + path := fmt.Sprintf(functionsTriggerBasePath, namespace) + req, err := s.client.NewRequest(ctx, http.MethodPost, path, opts) + if err != nil { + return nil, nil, err + } + root := new(triggerRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Trigger, resp, nil +} + +// UpdateTrigger updates a trigger +func (s *FunctionsServiceOp) UpdateTrigger(ctx context.Context, namespace string, trigger string, opts *FunctionsTriggerUpdateRequest) (*FunctionsTrigger, *Response, error) { + path := fmt.Sprintf(functionsTriggerBasePath+"/%s", namespace, trigger) + req, err := s.client.NewRequest(ctx, http.MethodPut, path, opts) + + if err != nil { + return nil, nil, err + } + root := new(triggerRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Trigger, resp, nil +} + +// DeleteTrigger deletes a trigger +func (s *FunctionsServiceOp) DeleteTrigger(ctx context.Context, namespace string, trigger string) (*Response, error) { + path := fmt.Sprintf(functionsTriggerBasePath+"/%s", namespace, trigger) + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + + if err != nil { + return nil, err + } + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index 7c15e92ff4f9..b226c220aacf 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -21,7 +21,7 @@ import ( ) const ( - libraryVersion = "1.91.1" + libraryVersion = "1.95.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" @@ -54,34 +54,36 @@ type Client struct { Balance BalanceService BillingHistory BillingHistoryService CDNs CDNService + Certificates CertificatesService + Databases DatabasesService Domains DomainsService Droplets DropletsService DropletActions DropletActionsService + Firewalls FirewallsService + FloatingIPs FloatingIPsService + FloatingIPActions FloatingIPActionsService + Functions FunctionsService Images ImagesService ImageActions ImageActionsService Invoices InvoicesService Keys KeysService + Kubernetes KubernetesService + LoadBalancers LoadBalancersService + Monitoring MonitoringService + OneClick OneClickService + Projects ProjectsService Regions RegionsService - Sizes SizesService - FloatingIPs FloatingIPsService - FloatingIPActions FloatingIPActionsService + Registry RegistryService ReservedIPs ReservedIPsService ReservedIPActions ReservedIPActionsService + Sizes SizesService Snapshots SnapshotsService Storage StorageService StorageActions StorageActionsService Tags TagsService - LoadBalancers LoadBalancersService - Certificates CertificatesService - Firewalls FirewallsService - Projects ProjectsService - Kubernetes KubernetesService - Registry RegistryService - Databases DatabasesService + Tokens TokensService + UptimeChecks UptimeChecksService VPCs VPCsService - OneClick OneClickService - Monitoring MonitoringService - Functions FunctionsService // Optional function called after every successful request made to the DO APIs onRequestCompleted RequestCompletionCallback @@ -216,6 +218,7 @@ func NewClient(httpClient *http.Client) *Client { baseURL, _ := url.Parse(defaultBaseURL) c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent} + c.Account = &AccountServiceOp{client: c} c.Actions = &ActionsServiceOp{client: c} c.Apps = &AppsServiceOp{client: c} @@ -223,33 +226,36 @@ func NewClient(httpClient *http.Client) *Client { c.BillingHistory = &BillingHistoryServiceOp{client: c} c.CDNs = &CDNServiceOp{client: c} c.Certificates = &CertificatesServiceOp{client: c} + c.Databases = &DatabasesServiceOp{client: c} c.Domains = &DomainsServiceOp{client: c} c.Droplets = &DropletsServiceOp{client: c} c.DropletActions = &DropletActionsServiceOp{client: c} c.Firewalls = &FirewallsServiceOp{client: c} c.FloatingIPs = &FloatingIPsServiceOp{client: c} c.FloatingIPActions = &FloatingIPActionsServiceOp{client: c} - c.ReservedIPs = &ReservedIPsServiceOp{client: c} - c.ReservedIPActions = &ReservedIPActionsServiceOp{client: c} + c.Functions = &FunctionsServiceOp{client: c} c.Images = &ImagesServiceOp{client: c} c.ImageActions = &ImageActionsServiceOp{client: c} c.Invoices = &InvoicesServiceOp{client: c} c.Keys = &KeysServiceOp{client: c} + c.Kubernetes = &KubernetesServiceOp{client: c} c.LoadBalancers = &LoadBalancersServiceOp{client: c} + c.Monitoring = &MonitoringServiceOp{client: c} + c.OneClick = &OneClickServiceOp{client: c} c.Projects = &ProjectsServiceOp{client: c} c.Regions = &RegionsServiceOp{client: c} + c.Registry = &RegistryServiceOp{client: c} + c.ReservedIPs = &ReservedIPsServiceOp{client: c} + c.ReservedIPActions = &ReservedIPActionsServiceOp{client: c} c.Sizes = &SizesServiceOp{client: c} c.Snapshots = &SnapshotsServiceOp{client: c} c.Storage = &StorageServiceOp{client: c} c.StorageActions = &StorageActionsServiceOp{client: c} c.Tags = &TagsServiceOp{client: c} - c.Kubernetes = &KubernetesServiceOp{client: c} - c.Registry = &RegistryServiceOp{client: c} - c.Databases = &DatabasesServiceOp{client: c} + c.Tokens = &TokensServiceOp{client: c} + c.UptimeChecks = &UptimeChecksServiceOp{client: c} c.VPCs = &VPCsServiceOp{client: c} - c.OneClick = &OneClickServiceOp{client: c} - c.Monitoring = &MonitoringServiceOp{client: c} - c.Functions = &FunctionsServiceOp{client: c} + c.headers = make(map[string]string) return c diff --git a/vendor/github.com/digitalocean/godo/keys.go b/vendor/github.com/digitalocean/godo/keys.go index d50946c23355..cd0bd29d5bd4 100644 --- a/vendor/github.com/digitalocean/godo/keys.go +++ b/vendor/github.com/digitalocean/godo/keys.go @@ -8,7 +8,7 @@ import ( const keysBasePath = "v2/account/keys" -// KeysService is an interface for interfacing with the keys +// KeysService is an interface for interfacing with the SSH keys // endpoints of the DigitalOcean API // See: https://docs.digitalocean.com/reference/api/api-reference/#tag/SSH-Keys type KeysService interface { @@ -22,7 +22,7 @@ type KeysService interface { DeleteByFingerprint(context.Context, string) (*Response, error) } -// KeysServiceOp handles communication with key related method of the +// KeysServiceOp handles communication with SSH key related method of the // DigitalOcean API. type KeysServiceOp struct { client *Client @@ -38,7 +38,7 @@ type Key struct { PublicKey string `json:"public_key,omitempty"` } -// KeyUpdateRequest represents a request to update a DigitalOcean key. +// KeyUpdateRequest represents a request to update an SSH key stored in a DigitalOcean account. type KeyUpdateRequest struct { Name string `json:"name"` } @@ -57,13 +57,13 @@ func (s Key) String() string { return Stringify(s) } -// KeyCreateRequest represents a request to create a new key. +// KeyCreateRequest represents a request to create a new SSH key. type KeyCreateRequest struct { Name string `json:"name"` PublicKey string `json:"public_key"` } -// List all keys +// List all SSH keys func (s *KeysServiceOp) List(ctx context.Context, opt *ListOptions) ([]Key, *Response, error) { path := keysBasePath path, err := addOptions(path, opt) @@ -107,7 +107,7 @@ func (s *KeysServiceOp) get(ctx context.Context, path string) (*Key, *Response, return root.SSHKey, resp, err } -// GetByID gets a Key by id +// GetByID gets an SSH key by its ID func (s *KeysServiceOp) GetByID(ctx context.Context, keyID int) (*Key, *Response, error) { if keyID < 1 { return nil, nil, NewArgError("keyID", "cannot be less than 1") @@ -117,7 +117,7 @@ func (s *KeysServiceOp) GetByID(ctx context.Context, keyID int) (*Key, *Response return s.get(ctx, path) } -// GetByFingerprint gets a Key by fingerprint +// GetByFingerprint gets an SSH key by its fingerprint func (s *KeysServiceOp) GetByFingerprint(ctx context.Context, fingerprint string) (*Key, *Response, error) { if len(fingerprint) < 1 { return nil, nil, NewArgError("fingerprint", "cannot not be empty") @@ -127,7 +127,7 @@ func (s *KeysServiceOp) GetByFingerprint(ctx context.Context, fingerprint string return s.get(ctx, path) } -// Create a key using a KeyCreateRequest +// Create an SSH key using a KeyCreateRequest func (s *KeysServiceOp) Create(ctx context.Context, createRequest *KeyCreateRequest) (*Key, *Response, error) { if createRequest == nil { return nil, nil, NewArgError("createRequest", "cannot be nil") @@ -147,7 +147,7 @@ func (s *KeysServiceOp) Create(ctx context.Context, createRequest *KeyCreateRequ return root.SSHKey, resp, err } -// UpdateByID updates a key name by ID. +// UpdateByID updates an SSH key name by ID. func (s *KeysServiceOp) UpdateByID(ctx context.Context, keyID int, updateRequest *KeyUpdateRequest) (*Key, *Response, error) { if keyID < 1 { return nil, nil, NewArgError("keyID", "cannot be less than 1") @@ -172,7 +172,7 @@ func (s *KeysServiceOp) UpdateByID(ctx context.Context, keyID int, updateRequest return root.SSHKey, resp, err } -// UpdateByFingerprint updates a key name by fingerprint. +// UpdateByFingerprint updates an SSH key name by fingerprint. func (s *KeysServiceOp) UpdateByFingerprint(ctx context.Context, fingerprint string, updateRequest *KeyUpdateRequest) (*Key, *Response, error) { if len(fingerprint) < 1 { return nil, nil, NewArgError("fingerprint", "cannot be empty") @@ -197,7 +197,7 @@ func (s *KeysServiceOp) UpdateByFingerprint(ctx context.Context, fingerprint str return root.SSHKey, resp, err } -// Delete key using a path +// Delete an SSH key using a path func (s *KeysServiceOp) delete(ctx context.Context, path string) (*Response, error) { req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) if err != nil { @@ -209,7 +209,7 @@ func (s *KeysServiceOp) delete(ctx context.Context, path string) (*Response, err return resp, err } -// DeleteByID deletes a key by its id +// DeleteByID deletes an SSH key by its id func (s *KeysServiceOp) DeleteByID(ctx context.Context, keyID int) (*Response, error) { if keyID < 1 { return nil, NewArgError("keyID", "cannot be less than 1") @@ -219,7 +219,7 @@ func (s *KeysServiceOp) DeleteByID(ctx context.Context, keyID int) (*Response, e return s.delete(ctx, path) } -// DeleteByFingerprint deletes a key by its fingerprint +// DeleteByFingerprint deletes an SSH key by its fingerprint func (s *KeysServiceOp) DeleteByFingerprint(ctx context.Context, fingerprint string) (*Response, error) { if len(fingerprint) < 1 { return nil, NewArgError("fingerprint", "cannot be empty") diff --git a/vendor/github.com/digitalocean/godo/load_balancers.go b/vendor/github.com/digitalocean/godo/load_balancers.go index 1466d5239702..6a9a70efe408 100644 --- a/vendor/github.com/digitalocean/godo/load_balancers.go +++ b/vendor/github.com/digitalocean/godo/load_balancers.go @@ -53,6 +53,7 @@ type LoadBalancer struct { ValidateOnly bool `json:"validate_only,omitempty"` ProjectID string `json:"project_id,omitempty"` HTTPIdleTimeoutSeconds *uint64 `json:"http_idle_timeout_seconds,omitempty"` + Firewall *LBFirewall `json:"firewall,omitempty"` } // String creates a human-readable description of a LoadBalancer. @@ -103,6 +104,10 @@ func (l LoadBalancer) AsRequest() *LoadBalancerRequest { r.Region = l.Region.Slug } + if l.Firewall != nil { + r.Firewall = l.Firewall.deepCopy() + } + return &r } @@ -149,6 +154,33 @@ func (s StickySessions) String() string { return Stringify(s) } +// LBFirewall holds the allow and deny rules for a loadbalancer's firewall. +// Currently, allow and deny rules support cidrs and ips. +// Please use the helper methods (IPSourceFirewall/CIDRSourceFirewall) to format the allow/deny rules. +type LBFirewall struct { + Allow []string `json:"allow,omitempty"` + Deny []string `json:"deny,omitempty"` +} + +func (lbf *LBFirewall) deepCopy() *LBFirewall { + return &LBFirewall{ + Allow: append([]string(nil), lbf.Allow...), + Deny: append([]string(nil), lbf.Deny...), + } +} + +// IPSourceFirewall takes an IP (string) and returns a formatted ip source firewall rule +func IPSourceFirewall(ip string) string { return fmt.Sprintf("ip:%s", ip) } + +// CIDRSourceFirewall takes a CIDR notation IP address and prefix length string +// like "192.0.2.0/24" and returns a formatted cidr source firewall rule +func CIDRSourceFirewall(cidr string) string { return fmt.Sprintf("cidr:%s", cidr) } + +// String creates a human-readable description of an LBFirewall instance. +func (f LBFirewall) String() string { + return Stringify(f) +} + // LoadBalancerRequest represents the configuration to be applied to an existing or a new load balancer. type LoadBalancerRequest struct { Name string `json:"name,omitempty"` @@ -172,6 +204,7 @@ type LoadBalancerRequest struct { ValidateOnly bool `json:"validate_only,omitempty"` ProjectID string `json:"project_id,omitempty"` HTTPIdleTimeoutSeconds *uint64 `json:"http_idle_timeout_seconds,omitempty"` + Firewall *LBFirewall `json:"firewall,omitempty"` } // String creates a human-readable description of a LoadBalancerRequest. diff --git a/vendor/github.com/digitalocean/godo/monitoring.go b/vendor/github.com/digitalocean/godo/monitoring.go index 5934c8757c18..937bb8d916f6 100644 --- a/vendor/github.com/digitalocean/godo/monitoring.go +++ b/vendor/github.com/digitalocean/godo/monitoring.go @@ -27,16 +27,18 @@ const ( DropletFiveMinuteLoadAverage = "v1/insights/droplet/load_5" DropletFifteenMinuteLoadAverage = "v1/insights/droplet/load_15" - LoadBalancerCPUUtilizationPercent = "v1/insights/lbaas/avg_cpu_utilization_percent" - LoadBalancerConnectionUtilizationPercent = "v1/insights/lbaas/connection_utilization_percent" - LoadBalancerDropletHealth = "v1/insights/lbaas/droplet_health" - LoadBalancerTLSUtilizationPercent = "v1/insights/lbaas/tls_connections_per_second_utilization_percent" - LoadBalancerIncreaseInHTTPErrorRatePercentage = "v1/insights/lbaas/increase_in_http_error_rate_percentage" - LoadBalancerIncreaseInHTTPErrorRateCount = "v1/insights/lbaas/increase_in_http_error_rate_count" - LoadBalancerHighHttpResponseTime = "v1/insights/lbaas/high_http_request_response_time" - LoadBalancerHighHttpResponseTime50P = "v1/insights/lbaas/high_http_request_response_time_50p" - LoadBalancerHighHttpResponseTime95P = "v1/insights/lbaas/high_http_request_response_time_95p" - LoadBalancerHighHttpResponseTime99P = "v1/insights/lbaas/high_http_request_response_time_99p" + LoadBalancerCPUUtilizationPercent = "v1/insights/lbaas/avg_cpu_utilization_percent" + LoadBalancerConnectionUtilizationPercent = "v1/insights/lbaas/connection_utilization_percent" + LoadBalancerDropletHealth = "v1/insights/lbaas/droplet_health" + LoadBalancerTLSUtilizationPercent = "v1/insights/lbaas/tls_connections_per_second_utilization_percent" + LoadBalancerIncreaseInHTTPErrorRatePercentage5xx = "v1/insights/lbaas/increase_in_http_error_rate_percentage_5xx" + LoadBalancerIncreaseInHTTPErrorRatePercentage4xx = "v1/insights/lbaas/increase_in_http_error_rate_percentage_4xx" + LoadBalancerIncreaseInHTTPErrorRateCount5xx = "v1/insights/lbaas/increase_in_http_error_rate_count_5xx" + LoadBalancerIncreaseInHTTPErrorRateCount4xx = "v1/insights/lbaas/increase_in_http_error_rate_count_4xx" + LoadBalancerHighHttpResponseTime = "v1/insights/lbaas/high_http_request_response_time" + LoadBalancerHighHttpResponseTime50P = "v1/insights/lbaas/high_http_request_response_time_50p" + LoadBalancerHighHttpResponseTime95P = "v1/insights/lbaas/high_http_request_response_time_95p" + LoadBalancerHighHttpResponseTime99P = "v1/insights/lbaas/high_http_request_response_time_99p" DbaasFifteenMinuteLoadAverage = "v1/dbaas/alerts/load_15_alerts" DbaasMemoryUtilizationPercent = "v1/dbaas/alerts/memory_utilization_alerts" diff --git a/vendor/github.com/digitalocean/godo/tokens.go b/vendor/github.com/digitalocean/godo/tokens.go new file mode 100644 index 000000000000..13aa418dff19 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/tokens.go @@ -0,0 +1,228 @@ +package godo + +import ( + "context" + "fmt" + "net/http" + "time" +) + +const ( + accessTokensBasePath = "v2/tokens" + tokenScopesBasePath = accessTokensBasePath + "/scopes" +) + +// TokensService is an interface for managing DigitalOcean API access tokens. +// It is not currently generally available. Follow the release notes for +// updates: https://docs.digitalocean.com/release-notes/api/ +type TokensService interface { + List(context.Context, *ListOptions) ([]Token, *Response, error) + Get(context.Context, int) (*Token, *Response, error) + Create(context.Context, *TokenCreateRequest) (*Token, *Response, error) + Update(context.Context, int, *TokenUpdateRequest) (*Token, *Response, error) + Revoke(context.Context, int) (*Response, error) + ListScopes(context.Context, *ListOptions) ([]TokenScope, *Response, error) + ListScopesByNamespace(context.Context, string, *ListOptions) ([]TokenScope, *Response, error) +} + +// TokensServiceOp handles communication with the tokens related methods of the +// DigitalOcean API. +type TokensServiceOp struct { + client *Client +} + +var _ TokensService = &TokensServiceOp{} + +// Token represents a DigitalOcean API token. +type Token struct { + ID int `json:"id"` + Name string `json:"name"` + Scopes []string `json:"scopes"` + ExpirySeconds *int `json:"expiry_seconds"` + CreatedAt time.Time `json:"created_at"` + LastUsedAt string `json:"last_used_at"` + + // AccessToken contains the actual Oauth token string. It is only included + // in the create response. + AccessToken string `json:"access_token,omitempty"` +} + +// tokenRoot represents a response from the DigitalOcean API +type tokenRoot struct { + Token *Token `json:"token"` +} + +type tokensRoot struct { + Tokens []Token `json:"tokens"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +// TokenCreateRequest represents a request to create a token. +type TokenCreateRequest struct { + Name string `json:"name"` + Scopes []string `json:"scopes"` + ExpirySeconds *int `json:"expiry_seconds,omitempty"` +} + +// TokenUpdateRequest represents a request to update a token. +type TokenUpdateRequest struct { + Name string `json:"name,omitempty"` + Scopes []string `json:"scopes,omitempty"` +} + +// TokenScope is a representation of a scope for the public API. +type TokenScope struct { + Name string `json:"name"` +} + +type tokenScopesRoot struct { + TokenScopes []TokenScope `json:"scopes"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +type tokenScopeNamespaceParam struct { + Namespace string `url:"namespace,omitempty"` +} + +// List all DigitalOcean API access tokens. +func (c TokensServiceOp) List(ctx context.Context, opt *ListOptions) ([]Token, *Response, error) { + path, err := addOptions(accessTokensBasePath, opt) + if err != nil { + return nil, nil, err + } + + req, err := c.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(tokensRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.Tokens, resp, err +} + +// Get a specific DigitalOcean API access token. +func (c TokensServiceOp) Get(ctx context.Context, tokenID int) (*Token, *Response, error) { + path := fmt.Sprintf("%s/%d", accessTokensBasePath, tokenID) + req, err := c.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(tokenRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Token, resp, err +} + +// Create a new DigitalOcean API access token. +func (c TokensServiceOp) Create(ctx context.Context, createRequest *TokenCreateRequest) (*Token, *Response, error) { + req, err := c.client.NewRequest(ctx, http.MethodPost, accessTokensBasePath, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(tokenRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Token, resp, err +} + +// Update the name or scopes of a specific DigitalOcean API access token. +func (c TokensServiceOp) Update(ctx context.Context, tokenID int, updateRequest *TokenUpdateRequest) (*Token, *Response, error) { + path := fmt.Sprintf("%s/%d", accessTokensBasePath, tokenID) + req, err := c.client.NewRequest(ctx, http.MethodPatch, path, updateRequest) + if err != nil { + return nil, nil, err + } + + root := new(tokenRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Token, resp, err +} + +// Revoke a specific DigitalOcean API access token. +func (c TokensServiceOp) Revoke(ctx context.Context, tokenID int) (*Response, error) { + path := fmt.Sprintf("%s/%d", accessTokensBasePath, tokenID) + req, err := c.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + resp, err := c.client.Do(ctx, req, nil) + + return resp, err +} + +// ListScopes lists all available scopes that can be granted to a token. +func (c TokensServiceOp) ListScopes(ctx context.Context, opt *ListOptions) ([]TokenScope, *Response, error) { + path, err := addOptions(tokenScopesBasePath, opt) + if err != nil { + return nil, nil, err + } + + return listTokenScopes(ctx, c, path) +} + +// ListScopesByNamespace lists available scopes in a namespace that can be granted +// to a token (e.g. the namespace for the `droplet:read“ scope is `droplet`). +func (c TokensServiceOp) ListScopesByNamespace(ctx context.Context, namespace string, opt *ListOptions) ([]TokenScope, *Response, error) { + path, err := addOptions(tokenScopesBasePath, opt) + if err != nil { + return nil, nil, err + } + + namespaceOpt := tokenScopeNamespaceParam{ + Namespace: namespace, + } + + path, err = addOptions(path, namespaceOpt) + if err != nil { + return nil, nil, err + } + + return listTokenScopes(ctx, c, path) +} + +func listTokenScopes(ctx context.Context, c TokensServiceOp, path string) ([]TokenScope, *Response, error) { + req, err := c.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(tokenScopesRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.TokenScopes, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/uptime.go b/vendor/github.com/digitalocean/godo/uptime.go new file mode 100644 index 000000000000..915d6c71389e --- /dev/null +++ b/vendor/github.com/digitalocean/godo/uptime.go @@ -0,0 +1,342 @@ +package godo + +import ( + "context" + "fmt" + "net/http" + "path" +) + +const uptimeChecksBasePath = "/v2/uptime/checks" + +// UptimeChecksService is an interface for creating and managing Uptime checks with the DigitalOcean API. +// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Uptime +type UptimeChecksService interface { + List(context.Context, *ListOptions) ([]UptimeCheck, *Response, error) + Get(context.Context, string) (*UptimeCheck, *Response, error) + GetState(context.Context, string) (*UptimeCheckState, *Response, error) + Create(context.Context, *CreateUptimeCheckRequest) (*UptimeCheck, *Response, error) + Update(context.Context, string, *UpdateUptimeCheckRequest) (*UptimeCheck, *Response, error) + Delete(context.Context, string) (*Response, error) + GetAlert(context.Context, string, string) (*UptimeAlert, *Response, error) + ListAlerts(context.Context, string, *ListOptions) ([]UptimeAlert, *Response, error) + CreateAlert(context.Context, string, *CreateUptimeAlertRequest) (*UptimeAlert, *Response, error) + UpdateAlert(context.Context, string, string, *UpdateUptimeAlertRequest) (*UptimeAlert, *Response, error) + DeleteAlert(context.Context, string, string) (*Response, error) +} + +// UptimeChecksServiceOp handles communication with Uptime Check methods of the DigitalOcean API. +type UptimeChecksServiceOp struct { + client *Client +} + +// UptimeCheck represents a DigitalOcean UptimeCheck configuration. +type UptimeCheck struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Target string `json:"target"` + Regions []string `json:"regions"` + Enabled bool `json:"enabled"` +} + +// UptimeAlert represents a DigitalOcean Uptime Alert configuration. +type UptimeAlert struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Threshold int `json:"threshold"` + Comparison string `json:"comparison"` + Notifications *Notifications `json:"notifications"` + Period string `json:"period"` +} + +// Notifications represents a DigitalOcean Notifications configuration. +type Notifications struct { + Email []string `json:"email"` + Slack []SlackDetails `json:"slack"` +} + +// UptimeCheckState represents a DigitalOcean Uptime Check's state configuration. +type UptimeCheckState struct { + Regions map[string]UptimeRegion `json:"regions"` + PreviousOutage UptimePreviousOutage `json:"previous_outage"` +} + +type UptimeRegion struct { + Status string `json:"status"` + StatusChangedAt string `json:"status_changed_at"` + ThirtyDayUptimePercentage float32 `json:"thirty_day_uptime_percentage"` +} + +// UptimePreviousOutage represents a DigitalOcean Uptime Check's previous outage configuration. +type UptimePreviousOutage struct { + Region string `json:"region"` + StartedAt string `json:"started_at"` + EndedAt string `json:"ended_at"` + DurationSeconds int `json:"duration_seconds"` +} + +// CreateUptimeCheckRequest represents the request to create a new uptime check. +type CreateUptimeCheckRequest struct { + Name string `json:"name"` + Type string `json:"type"` + Target string `json:"target"` + Regions []string `json:"regions"` + Enabled bool `json:"enabled"` +} + +// UpdateUptimeCheckRequest represents the request to update uptime check information. +type UpdateUptimeCheckRequest struct { + Name string `json:"name"` + Type string `json:"type"` + Target string `json:"target"` + Regions []string `json:"regions"` + Enabled bool `json:"enabled"` +} + +// CreateUptimeUptimeAlertRequest represents the request to create a new Uptime Alert. +type CreateUptimeAlertRequest struct { + Name string `json:"name"` + Type string `json:"type"` + Threshold int `json:"threshold"` + Comparison string `json:"comparison"` + Notifications *Notifications `json:"notifications"` + Period string `json:"period"` +} + +// UpdateUptimeAlertRequest represents the request to create a new alert. +type UpdateUptimeAlertRequest struct { + Name string `json:"name"` + Type string `json:"type"` + Threshold int `json:"threshold"` + Comparison string `json:"comparison"` + Notifications *Notifications `json:"notifications"` + Period string `json:"period"` +} + +type uptimeChecksRoot struct { + UptimeChecks []UptimeCheck `json:"checks"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +type uptimeCheckStateRoot struct { + UptimeCheckState UptimeCheckState `json:"state"` +} + +type uptimeAlertsRoot struct { + UptimeAlerts []UptimeAlert `json:"alerts"` + Links *Links `json:"links"` + Meta *Meta `json:"meta"` +} + +type uptimeCheckRoot struct { + UptimeCheck *UptimeCheck `json:"check"` +} + +type uptimeAlertRoot struct { + UptimeAlert *UptimeAlert `json:"alert"` +} + +var _ UptimeChecksService = &UptimeChecksServiceOp{} + +// List Checks. +func (p *UptimeChecksServiceOp) List(ctx context.Context, opts *ListOptions) ([]UptimeCheck, *Response, error) { + path, err := addOptions(uptimeChecksBasePath, opts) + if err != nil { + return nil, nil, err + } + + req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(uptimeChecksRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.UptimeChecks, resp, err +} + +// GetState of uptime check. +func (p *UptimeChecksServiceOp) GetState(ctx context.Context, uptimeCheckID string) (*UptimeCheckState, *Response, error) { + path := path.Join(uptimeChecksBasePath, uptimeCheckID, "/state") + + req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(uptimeCheckStateRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return &root.UptimeCheckState, resp, err +} + +// Get retrieves a single uptime check by its ID. +func (p *UptimeChecksServiceOp) Get(ctx context.Context, uptimeCheckID string) (*UptimeCheck, *Response, error) { + path := path.Join(uptimeChecksBasePath, uptimeCheckID) + + req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(uptimeCheckRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.UptimeCheck, resp, err +} + +// Create a new uptime check. +func (p *UptimeChecksServiceOp) Create(ctx context.Context, cr *CreateUptimeCheckRequest) (*UptimeCheck, *Response, error) { + req, err := p.client.NewRequest(ctx, http.MethodPost, uptimeChecksBasePath, cr) + if err != nil { + return nil, nil, err + } + + root := new(uptimeCheckRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.UptimeCheck, resp, err +} + +// Update an uptime check. +func (p *UptimeChecksServiceOp) Update(ctx context.Context, uptimeCheckID string, ur *UpdateUptimeCheckRequest) (*UptimeCheck, *Response, error) { + path := path.Join(uptimeChecksBasePath, uptimeCheckID) + req, err := p.client.NewRequest(ctx, http.MethodPut, path, ur) + if err != nil { + return nil, nil, err + } + + root := new(uptimeCheckRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.UptimeCheck, resp, err +} + +// Delete an existing uptime check. +func (p *UptimeChecksServiceOp) Delete(ctx context.Context, uptimeCheckID string) (*Response, error) { + path := path.Join(uptimeChecksBasePath, uptimeCheckID) + req, err := p.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + return p.client.Do(ctx, req, nil) +} + +// alerts + +// ListAlerts lists alerts for a check. +func (p *UptimeChecksServiceOp) ListAlerts(ctx context.Context, uptimeCheckID string, opts *ListOptions) ([]UptimeAlert, *Response, error) { + fullPath := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts") + path, err := addOptions(fullPath, opts) + if err != nil { + return nil, nil, err + } + + req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(uptimeAlertsRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + if m := root.Meta; m != nil { + resp.Meta = m + } + + return root.UptimeAlerts, resp, err +} + +// CreateAlert creates a new check alert. +func (p *UptimeChecksServiceOp) CreateAlert(ctx context.Context, uptimeCheckID string, cr *CreateUptimeAlertRequest) (*UptimeAlert, *Response, error) { + fullPath := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts") + req, err := p.client.NewRequest(ctx, http.MethodPost, fullPath, cr) + if err != nil { + return nil, nil, err + } + + root := new(uptimeAlertRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.UptimeAlert, resp, err +} + +// GetAlert retrieves a single uptime check alert by its ID. +func (p *UptimeChecksServiceOp) GetAlert(ctx context.Context, uptimeCheckID string, alertID string) (*UptimeAlert, *Response, error) { + path := fmt.Sprintf("v2/uptime/checks/%s/alerts/%s", uptimeCheckID, alertID) + + req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(uptimeAlertRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.UptimeAlert, resp, err +} + +// UpdateAlert updates an check's alert. +func (p *UptimeChecksServiceOp) UpdateAlert(ctx context.Context, uptimeCheckID string, alertID string, ur *UpdateUptimeAlertRequest) (*UptimeAlert, *Response, error) { + path := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts/", alertID) + req, err := p.client.NewRequest(ctx, http.MethodPut, path, ur) + if err != nil { + return nil, nil, err + } + + root := new(uptimeAlertRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.UptimeAlert, resp, err +} + +// DeleteAlert deletes an existing check's alert. +func (p *UptimeChecksServiceOp) DeleteAlert(ctx context.Context, uptimeCheckID string, alertID string) (*Response, error) { + path := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts/", alertID) + req, err := p.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + return p.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 4bc91cffd6e5..63db4c617b73 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -48,7 +48,7 @@ func (args Args) Keys() []string { // MarshalJSON returns a JSON byte representation of the Args func (args Args) MarshalJSON() ([]byte, error) { if len(args.fields) == 0 { - return []byte{}, nil + return []byte("{}"), nil } return json.Marshal(args.fields) } @@ -106,9 +106,6 @@ func FromJSON(p string) (Args, error) { // UnmarshalJSON populates the Args from JSON encode bytes func (args Args) UnmarshalJSON(raw []byte) error { - if len(raw) == 0 { - return nil - } return json.Unmarshal(raw, &args.fields) } diff --git a/vendor/github.com/efficientgo/core/COPYRIGHT b/vendor/github.com/efficientgo/core/COPYRIGHT new file mode 100644 index 000000000000..0b803fd346d9 --- /dev/null +++ b/vendor/github.com/efficientgo/core/COPYRIGHT @@ -0,0 +1,2 @@ +Copyright (c) The EfficientGo Authors. +Licensed under the Apache License 2.0. \ No newline at end of file diff --git a/vendor/github.com/efficientgo/tools/core/LICENSE b/vendor/github.com/efficientgo/core/LICENSE similarity index 100% rename from vendor/github.com/efficientgo/tools/core/LICENSE rename to vendor/github.com/efficientgo/core/LICENSE diff --git a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/do.go b/vendor/github.com/efficientgo/core/errcapture/do.go similarity index 89% rename from vendor/github.com/efficientgo/tools/core/pkg/errcapture/do.go rename to vendor/github.com/efficientgo/core/errcapture/do.go index c7176a475b24..40f9eb9d7eba 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/do.go +++ b/vendor/github.com/efficientgo/core/errcapture/do.go @@ -10,11 +10,10 @@ package errcapture import ( "io" - "io/ioutil" "os" - "github.com/efficientgo/tools/core/pkg/merrors" - "github.com/pkg/errors" + "github.com/efficientgo/core/errors" + "github.com/efficientgo/core/merrors" ) type doFunc func() error @@ -38,7 +37,7 @@ func Do(err *error, doer doFunc, format string, a ...interface{}) { // ExhaustClose closes the io.ReadCloser with error capture but exhausts the reader before. func ExhaustClose(err *error, r io.ReadCloser, format string, a ...interface{}) { - _, copyErr := io.Copy(ioutil.Discard, r) + _, copyErr := io.Copy(io.Discard, r) Do(err, r.Close, format, a...) diff --git a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/doc.go b/vendor/github.com/efficientgo/core/errcapture/doc.go similarity index 71% rename from vendor/github.com/efficientgo/tools/core/pkg/errcapture/doc.go rename to vendor/github.com/efficientgo/core/errcapture/doc.go index 96c1c22918f4..ab7f2ed82bab 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/errcapture/doc.go +++ b/vendor/github.com/efficientgo/core/errcapture/doc.go @@ -1,29 +1,43 @@ // Copyright (c) The EfficientGo Authors. // Licensed under the Apache License 2.0. -package errcapture - +// Package errcapture implements robust error handling in defer statements using provided return argument. +// // Close a `io.Closer` interface or execute any function that returns error safely while capturing error. // It's often forgotten but it's a caller responsibility to close all implementations of `Closer`, // such as *os.File or io.ReaderCloser. Commonly we would use: // -// defer closer.Close() +// defer closer.Close() // // This is wrong. Close() usually return important error (e.g for os.File the actual file flush might happen and fail on `Close` method). // It's very important to *always* check error. `errcapture` provides utility functions to capture error and add to provided one, // still allowing to put them in a convenient `defer` statement: // -// func <...>(...) (err error) { -// ... -// defer errcapture.Do(&err, closer.Close, "log format message") +// func <...>(...) (err error) { +// ... +// defer errcapture.Do(&err, closer.Close, "log format message") // -// ... -// } +// ... +// } // // If Close returns error, `errcapture.Do` will capture it, add to input error if not nil and return by argument. // +// Example: +// +// func DoAndClose(f *os.File) (err error) { +// defer errcapture.Do(&err, f.Close, "close file at the end") +// +// // Do something... +// if err := do(); err != nil { +// return err +// } +// +// return nil +// } +// // The errcapture.ExhaustClose function provide the same functionality but takes an io.ReadCloser and exhausts the whole // reader before closing. This is useful when trying to use http keep-alive connections because for the same connection // to be re-used the whole response body needs to be exhausted. // // Check https://pkg.go.dev/github.com/efficientgo/tools/pkg/logerrcapture if you want to just log an error instead. +package errcapture diff --git a/vendor/github.com/efficientgo/core/errors/doc.go b/vendor/github.com/efficientgo/core/errors/doc.go new file mode 100644 index 000000000000..c489b2e2f275 --- /dev/null +++ b/vendor/github.com/efficientgo/core/errors/doc.go @@ -0,0 +1,13 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Thanos and contributed by https://github.com/bisakhmondal. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +// Package errors provides basic utilities to manipulate errors with a useful stacktrace. It combines the +// benefits of errors.New and fmt.Errorf world into a single package. It is an improved and minimal version of the popular +// https://github.com/pkg/errors (archived) package allowing reliable wrapping of errors with stacktrace. Unfortunately, +// the standard library recommended error wrapping using `%+w` is prone to errors and does not support stacktraces. +package errors diff --git a/vendor/github.com/efficientgo/core/errors/errors.go b/vendor/github.com/efficientgo/core/errors/errors.go new file mode 100644 index 000000000000..ab032cfe1057 --- /dev/null +++ b/vendor/github.com/efficientgo/core/errors/errors.go @@ -0,0 +1,168 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Thanos and contributed by https://github.com/bisakhmondal. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package errors + +import ( + "errors" + "fmt" + "strings" +) + +// base is the fundamental struct that implements the error interface and the acts as the backbone of this errors package. +type base struct { + // info contains the error message passed through calls like errors.Wrap, errors.New. + info string + // stacktrace stores information about the program counters - i.e. where this error was generated. + stack stacktrace + // err is the actual error which is being wrapped with a stacktrace and message information. + err error +} + +// Error implements the error interface. +func (b *base) Error() string { + if b.err != nil { + return fmt.Sprintf("%s: %s", b.info, b.err.Error()) + } + return b.info +} + +// Unwrap implements the error Unwrap interface. +func (b *base) Unwrap() error { + return b.err +} + +// Format implements the fmt.Formatter interface to support the formatting of an error chain with "%+v" verb. +// Whenever error is printed with %+v format verb, stacktrace info gets dumped to the output. +func (b *base) Format(s fmt.State, verb rune) { + if verb == 'v' && s.Flag('+') { + s.Write([]byte(formatErrorChain(b))) + return + } + + s.Write([]byte(b.Error())) +} + +// New returns a new error with a stacktrace of recent call frames. Each call to New +// returns a distinct error value even if the text is identical. An alternative of +// the errors.New function from github.com/pkg/errors. +func New(message string) error { + return &base{ + info: message, + stack: newStackTrace(), + err: nil, + } +} + +// Newf is like New, but it formats input according to a format specifier. +// An alternative of the fmt.Errorf function. +// +// If no args have been passed, it is same as `New` function without formatting. Character like +// '%' still has to be escaped in that scenario. +func Newf(format string, args ...interface{}) error { + return &base{ + info: fmt.Sprintf(format, args...), + stack: newStackTrace(), + err: nil, + } +} + +// Wrap returns a new error, which wraps another error with a stacktrace containing recent call frames. +// +// If cause is nil, it does not produce the error, similar to errors.Wrap from github.com/pkg/errors was doing. +// Still, avoid `errors.Wrap(nil, "...") patterns as it can lead to inefficiencies while constructing arguments +// to format as well readability issues. We keep this behaviour to make sure it's a drop-in replacement. +func Wrap(cause error, message string) error { + if cause == nil { + return nil + } + + return &base{ + info: message, + stack: newStackTrace(), + err: cause, + } +} + +// Wrapf is like Wrap but the message is formatted with the supplied format specifier. +// +// If no args have been passed, it is same as `Wrap` function without formatting. +// Character like '%' still has to be escaped in that scenario. +func Wrapf(cause error, format string, args ...interface{}) error { + if cause == nil { + return nil + } + return &base{ + info: fmt.Sprintf(format, args...), + stack: newStackTrace(), + err: cause, + } +} + +// Cause returns the result of repeatedly calling the Unwrap method on err, if err's +// type implements an Unwrap method. Otherwise, Cause returns the last encountered error. +// The difference between Unwrap and Cause is the first one performs unwrapping of one level +// but Cause returns the last err (whether it's nil or not) where it failed to assert +// the interface containing the Unwrap method. +// This is a replacement of errors.Cause without the causer interface from pkg/errors which +// actually can be sufficed through the errors.Is function. But considering some use cases +// where we need to peel off all the external layers applied through errors.Wrap family, +// it is useful ( where external SDK doesn't use errors.Is internally). +func Cause(err error) error { + for err != nil { + e, ok := err.(interface { + Unwrap() error + }) + if !ok { + return err + } + err = e.Unwrap() + } + return nil +} + +// formatErrorChain formats an error chain. +func formatErrorChain(err error) string { + var buf strings.Builder + for err != nil { + if e, ok := err.(*base); ok { + buf.WriteString(fmt.Sprintf("%s\n%v", e.info, e.stack)) + err = e.err + } else { + buf.WriteString(fmt.Sprintf("%s\n", err.Error())) + err = nil + } + } + return buf.String() +} + +// The functions `Is`, `As` & `Unwrap` provides a thin wrapper around the builtin errors +// package in go. Just for sake of completeness and correct autocompletion behaviors from +// IDEs they have been wrapped using functions instead of using variable to reference them +// as first class functions (eg: var Is = errros.Is ). + +// Is is a wrapper of built-in errors.Is. It reports whether any error in err's +// chain matches target. The chain consists of err itself followed by the sequence +// of errors obtained by repeatedly calling Unwrap. +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// As is a wrapper of built-in errors.As. It finds the first error in err's +// chain that matches target, and if one is found, sets target to that error +// value and returns true. Otherwise, it returns false. +func As(err error, target interface{}) bool { + return errors.As(err, target) +} + +// Unwrap is a wrapper of built-in errors.Unwrap. Unwrap returns the result of +// calling the Unwrap method on err, if err's type contains an Unwrap method +// returning error. Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return errors.Unwrap(err) +} diff --git a/vendor/github.com/efficientgo/core/errors/stacktrace.go b/vendor/github.com/efficientgo/core/errors/stacktrace.go new file mode 100644 index 000000000000..a479977d812e --- /dev/null +++ b/vendor/github.com/efficientgo/core/errors/stacktrace.go @@ -0,0 +1,58 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Initially copied from Thanos and contributed by https://github.com/bisakhmondal. +// +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package errors + +import ( + "fmt" + "runtime" + "strings" +) + +// stacktrace holds a snapshot of program counters. +type stacktrace []uintptr + +// newStackTrace captures a stack trace. It skips first 3 frames to record the +// snapshot of the stack trace at the origin of a particular error. It tries to +// record maximum 16 frames (if available). +func newStackTrace() stacktrace { + const stackDepth = 16 // record maximum 16 frames (if available). + + pc := make([]uintptr, stackDepth) + // using skip=3 for not to count the program counter address of + // 1. the respective function from errors package (eg. errors.New) + // 2. newStacktrace itself + // 3. the function used in runtime.Callers + n := runtime.Callers(3, pc) + + // this approach is taken to reduce long term memory footprint (obtained through escape analysis). + // We are returning a new slice by re-slicing the pc with the required length and capacity (when the + // no of returned callFrames is less that stackDepth). This uses less memory compared to pc[:n] as + // the capacity of new slice is inherited from the parent slice if not specified. + return pc[:n:n] +} + +// String implements the fmt.Stringer interface to provide formatted text output. +func (s stacktrace) String() string { + var buf strings.Builder + + // CallersFrames takes the slice of Program Counter addresses returned by Callers to + // retrieve function/file/line information. + cf := runtime.CallersFrames(s) + for { + // more indicates if the next call will be successful or not. + frame, more := cf.Next() + // used formatting scheme <`>`space><:> for example: + // > testing.tRunner /home/go/go1.17.8/src/testing/testing.go:1259 + buf.WriteString(fmt.Sprintf("> %s\t%s:%d\n", frame.Func.Name(), frame.File, frame.Line)) + if !more { + break + } + } + return buf.String() +} diff --git a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/do.go b/vendor/github.com/efficientgo/core/logerrcapture/do.go similarity index 94% rename from vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/do.go rename to vendor/github.com/efficientgo/core/logerrcapture/do.go index 41662b901387..6bad9fa6ebac 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/do.go +++ b/vendor/github.com/efficientgo/core/logerrcapture/do.go @@ -11,10 +11,9 @@ package logerrcapture import ( "fmt" "io" - "io/ioutil" "os" - "github.com/pkg/errors" + "github.com/efficientgo/core/errors" ) // Logger interface compatible with go-kit/logger. @@ -42,7 +41,7 @@ func Do(logger Logger, doer doFunc, format string, a ...interface{}) { // ExhaustClose closes the io.ReadCloser with a log message on error but exhausts the reader before. func ExhaustClose(logger Logger, r io.ReadCloser, format string, a ...interface{}) { - _, err := io.Copy(ioutil.Discard, r) + _, err := io.Copy(io.Discard, r) if err != nil { _ = logger.Log("msg", "failed to exhaust reader, performance may be impeded", "err", err) } diff --git a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/doc.go b/vendor/github.com/efficientgo/core/logerrcapture/doc.go similarity index 66% rename from vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/doc.go rename to vendor/github.com/efficientgo/core/logerrcapture/doc.go index 414efc995827..90738562cebf 100644 --- a/vendor/github.com/efficientgo/tools/core/pkg/logerrcapture/doc.go +++ b/vendor/github.com/efficientgo/core/logerrcapture/doc.go @@ -1,30 +1,44 @@ // Copyright (c) The EfficientGo Authors. // Licensed under the Apache License 2.0. -package logerrcapture - -// Close a `io.Closer` interface or execute any function that returns error safely while logging error. +// Package logerrcapture implements robust error handling in defer statements using provided logger. +// +// The Close a `io.Closer` interface or execute any function that returns error safely while logging error. // It's often forgotten but it's a caller responsibility to close all implementations of `Closer`, // such as *os.File or io.ReaderCloser. Commonly we would use: // -// defer closer.Close() +// defer closer.Close() // // This is wrong. Close() usually return important error (e.g for os.File the actual file flush might happen and fail on `Close` method). // It's very important to *always* check error. `logerrcapture` provides utility functions to capture error and log it via provided // logger, while still allowing to put them in a convenient `defer` statement: // -// func <...>(...) (err error) { -// ... -// defer logerrcapture.Do(logger, closer.Close, "log format message") +// func <...>(...) (err error) { +// ... +// defer logerrcapture.Do(logger, closer.Close, "log format message") // -// ... -// } +// ... +// } // // If Close returns error, `logerrcapture.Do` will capture it, add to input error if not nil and return by argument. // +// Example: +// +// func DoAndClose(f *os.File, logger logerrcapture.Logger) error { +// defer logerrcapture.Do(logger, f.Close, "close file at the end") +// +// // Do something... +// if err := do(); err != nil { +// return err +// } +// +// return nil +// } +// // The logerrcapture.ExhaustClose function provide the same functionality but takes an io.ReadCloser and exhausts the whole // reader before closing. This is useful when trying to use http keep-alive connections because for the same connection // to be re-used the whole response body needs to be exhausted. // // Recommended: Check https://pkg.go.dev/github.com/efficientgo/tools/pkg/errcapture if you want to return error instead of just logging (causing // hard error). +package logerrcapture diff --git a/vendor/github.com/efficientgo/core/merrors/doc.go b/vendor/github.com/efficientgo/core/merrors/doc.go new file mode 100644 index 000000000000..a670493f46a1 --- /dev/null +++ b/vendor/github.com/efficientgo/core/merrors/doc.go @@ -0,0 +1,29 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Package merrors implements multi error implementation that chains errors on the same level. +// Supports errors.As and errors.Is functions. +// +// Example 1: +// +// return merrors.New(err1, err2).Err() +// +// Example 2: +// +// merr := merrors.New(err1) +// merr.Add(err2, errOrNil3) +// for _, err := range errs { +// merr.Add(err) +// } +// return merr.Err() +// +// Example 3: +// +// func CloseAll(closers []io.Closer) error { +// errs := merrors.New() +// for _ , c := range closers { +// errs.Add(c.Close()) +// } +// return errs.Err() +// } +package merrors diff --git a/vendor/github.com/efficientgo/tools/core/pkg/merrors/errors.go b/vendor/github.com/efficientgo/core/merrors/merrors.go similarity index 100% rename from vendor/github.com/efficientgo/tools/core/pkg/merrors/errors.go rename to vendor/github.com/efficientgo/core/merrors/merrors.go diff --git a/vendor/github.com/efficientgo/core/testutil/doc.go b/vendor/github.com/efficientgo/core/testutil/doc.go new file mode 100644 index 000000000000..ac4ca1df5510 --- /dev/null +++ b/vendor/github.com/efficientgo/core/testutil/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// Package testutil is a minimal testing utility with only few functions like `Assert`, `Ok`, `NotOk` for errors and `Equals`. +// It also provides TB ("TestOrBench") utility for union of testing and benchmarks. +package testutil diff --git a/vendor/github.com/efficientgo/core/testutil/internal/difflib.go b/vendor/github.com/efficientgo/core/testutil/internal/difflib.go new file mode 100644 index 000000000000..aea79d215434 --- /dev/null +++ b/vendor/github.com/efficientgo/core/testutil/internal/difflib.go @@ -0,0 +1,645 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +// It provides tools to compare sequences of strings and generate textual diffs. +// +// Maintaining `GetUnifiedDiffString` here because original repository +// (https://github.com/pmezard/go-difflib) is no loger maintained. + +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

    " lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool, +) *SequenceMatcher { + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// +// and for all (i',j',k') meeting those conditions, +// +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{ + c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n), + }) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s]++ + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches++ + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning-- // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return w.String(), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/efficientgo/tools/core/pkg/testutil/testorbench.go b/vendor/github.com/efficientgo/core/testutil/testorbench.go similarity index 100% rename from vendor/github.com/efficientgo/tools/core/pkg/testutil/testorbench.go rename to vendor/github.com/efficientgo/core/testutil/testorbench.go diff --git a/vendor/github.com/efficientgo/core/testutil/testutil.go b/vendor/github.com/efficientgo/core/testutil/testutil.go new file mode 100644 index 000000000000..c88191bc10d4 --- /dev/null +++ b/vendor/github.com/efficientgo/core/testutil/testutil.go @@ -0,0 +1,233 @@ +// Copyright (c) The EfficientGo Authors. +// Licensed under the Apache License 2.0. + +package testutil + +import ( + "fmt" + "path/filepath" + "reflect" + "runtime" + "runtime/debug" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/efficientgo/core/errors" + "github.com/efficientgo/core/testutil/internal" + "github.com/google/go-cmp/cmp" +) + +const limitOfElemChars = 1e3 + +func withLimitf(f string, v ...interface{}) string { + s := fmt.Sprintf(f, v...) + if len(s) > limitOfElemChars { + return s[:limitOfElemChars] + "...(output trimmed)" + } + return s +} + +// Assert fails the test if the condition is false. +func Assert(tb testing.TB, condition bool, v ...interface{}) { + tb.Helper() + if condition { + return + } + _, file, line, _ := runtime.Caller(1) + + var msg string + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + tb.Fatalf("\033[31m%s:%d: \"%s\"\033[39m\n\n", filepath.Base(file), line, withLimitf(msg)) +} + +// Ok fails the test if an err is not nil. +func Ok(tb testing.TB, err error, v ...interface{}) { + tb.Helper() + if err == nil { + return + } + _, file, line, _ := runtime.Caller(1) + + var msg string + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + tb.Fatalf("\033[31m%s:%d: \"%s\"\n\n unexpected error: %s\033[39m\n\n", filepath.Base(file), line, withLimitf(msg), withLimitf(err.Error())) +} + +// NotOk fails the test if an err is nil. +func NotOk(tb testing.TB, err error, v ...interface{}) { + tb.Helper() + if err != nil { + return + } + _, file, line, _ := runtime.Caller(1) + + var msg string + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + tb.Fatalf("\033[31m%s:%d: \"%s\"\n\n expected error, got nothing \033[39m\n\n", filepath.Base(file), line, withLimitf(msg)) +} + +// Equals fails the test if exp is not equal to act. +func Equals(tb testing.TB, exp, act interface{}, v ...interface{}) { + tb.Helper() + if reflect.DeepEqual(exp, act) { + return + } + fatalNotEqual(tb, exp, act, v...) +} + +func fatalNotEqual(tb testing.TB, exp, act interface{}, v ...interface{}) { + _, file, line, _ := runtime.Caller(2) + + var msg string + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + tb.Fatalf( + "\033[31m%s:%d: \"%s\"\n\n\texp: %s\n\n\tgot: %s%s\033[39m\n\n", + filepath.Base(file), line, withLimitf(msg), withLimitf("%#v", exp), withLimitf("%#v", act), withLimitf(diff(exp, act)), + ) +} + +type goCmp struct { + opts cmp.Options +} + +// WithGoCmp allows specifying options and using https://github.com/google/go-cmp +// for equality comparisons. The compatibility guarantee of this function's arguments +// are the same as go-cmp (no guarantee due to v0.x). +func WithGoCmp(opts ...cmp.Option) goCmp { + return goCmp{opts: opts} +} + +// Equals uses go-cmp for comparing equality between two structs, and can be used with +// various options defined in go-cmp/cmp and go-cmp/cmp/cmpopts. +func (o goCmp) Equals(tb testing.TB, exp, act interface{}, v ...interface{}) { + tb.Helper() + if cmp.Equal(exp, act, o.opts) { + return + } + fatalNotEqual(tb, exp, act, v...) +} + +// FaultOrPanicToErr returns error if panic of fault was triggered during execution of function. +func FaultOrPanicToErr(f func()) (err error) { + // Set this go routine to panic on segfault to allow asserting on those. + debug.SetPanicOnFault(true) + defer func() { + if r := recover(); r != nil { + err = errors.Newf("invoked function panicked or caused segmentation fault: %v", r) + } + debug.SetPanicOnFault(false) + }() + + f() + + return err +} + +// ContainsStringSlice fails the test if needle is not contained within haystack, if haystack or needle is +// an empty slice, or if needle is longer than haystack. +func ContainsStringSlice(tb testing.TB, haystack, needle []string) { + _, file, line, _ := runtime.Caller(1) + + if !contains(haystack, needle) { + tb.Fatalf("\033[31m%s:%d: %s does not contain %s\033[39m\n\n", filepath.Base(file), line, withLimitf("%#v", haystack), withLimitf("%#v", needle)) + } +} + +func contains(haystack, needle []string) bool { + if len(haystack) == 0 || len(needle) == 0 { + return false + } + + if len(haystack) < len(needle) { + return false + } + + for i := 0; i < len(haystack); i++ { + outer := i + + for j := 0; j < len(needle); j++ { + // End of the haystack but not the end of the needle, end + if outer == len(haystack) { + return false + } + + // No match, try the next index of the haystack + if haystack[outer] != needle[j] { + break + } + + // End of the needle and it still matches, end + if j == len(needle)-1 { + return true + } + + // This element matches between the two slices, try the next one + outer++ + } + } + + return false +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice, array or string. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { + return "" + } + + var e, a string + c := spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + } + if et != reflect.TypeOf("") { + e = c.Sdump(expected) + a = c.Sdump(actual) + } else { + e = reflect.ValueOf(expected).String() + a = reflect.ValueOf(actual).String() + } + + diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ + A: internal.SplitLines(e), + B: internal.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + return "\n\nDiff:\n" + diff +} diff --git a/vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go b/vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go deleted file mode 100644 index d1eaf8333cb1..000000000000 --- a/vendor/github.com/efficientgo/tools/core/pkg/merrors/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) The EfficientGo Authors. -// Licensed under the Apache License 2.0. - -package merrors - -// Safe multi error implementation that chains errors on the same level. Supports errors.As and errors.Is functions. -// -// Example 1: -// -// return merrors.New(err1, err2).Err() -// -// Example 2: -// -// merr := merrors.New(err1) -// merr.Add(err2, errOrNil3) -// for _, err := range errs { -// merr.Add(err) -// } -// return merr.Err() -// diff --git a/vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go b/vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go deleted file mode 100644 index dc9f63f1b0ab..000000000000 --- a/vendor/github.com/efficientgo/tools/core/pkg/testutil/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) The EfficientGo Authors. -// Licensed under the Apache License 2.0. - -package testutil - -// Simplistic assertion helpers for testing code. TestOrBench utils for union of testing and benchmarks. diff --git a/vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go b/vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go deleted file mode 100644 index 8a46d2e68f50..000000000000 --- a/vendor/github.com/efficientgo/tools/core/pkg/testutil/testutil.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright (c) The EfficientGo Authors. -// Licensed under the Apache License 2.0. - -package testutil - -import ( - "fmt" - "path/filepath" - "reflect" - "runtime" - "runtime/debug" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/pkg/errors" - "github.com/pmezard/go-difflib/difflib" - "go.uber.org/goleak" -) - -// Assert fails the test if the condition is false. -func Assert(tb testing.TB, condition bool, v ...interface{}) { - tb.Helper() - if condition { - return - } - _, file, line, _ := runtime.Caller(1) - - var msg string - if len(v) > 0 { - msg = fmt.Sprintf(v[0].(string), v[1:]...) - } - tb.Fatalf("\033[31m%s:%d: "+msg+"\033[39m\n\n", filepath.Base(file), line) -} - -// Ok fails the test if an err is not nil. -func Ok(tb testing.TB, err error, v ...interface{}) { - tb.Helper() - if err == nil { - return - } - _, file, line, _ := runtime.Caller(1) - - var msg string - if len(v) > 0 { - msg = fmt.Sprintf(v[0].(string), v[1:]...) - } - tb.Fatalf("\033[31m%s:%d:"+msg+"\n\n unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) -} - -// NotOk fails the test if an err is nil. -func NotOk(tb testing.TB, err error, v ...interface{}) { - tb.Helper() - if err != nil { - return - } - _, file, line, _ := runtime.Caller(1) - - var msg string - if len(v) > 0 { - msg = fmt.Sprintf(v[0].(string), v[1:]...) - } - tb.Fatalf("\033[31m%s:%d:"+msg+"\n\n expected error, got nothing \033[39m\n\n", filepath.Base(file), line) -} - -// Equals fails the test if exp is not equal to act. -func Equals(tb testing.TB, exp, act interface{}, v ...interface{}) { - tb.Helper() - if reflect.DeepEqual(exp, act) { - return - } - _, file, line, _ := runtime.Caller(1) - - var msg string - if len(v) > 0 { - msg = fmt.Sprintf(v[0].(string), v[1:]...) - } - tb.Fatal(sprintfWithLimit("\033[31m%s:%d:"+msg+"\n\n\texp: %#v\n\n\tgot: %#v%s\033[39m\n\n", filepath.Base(file), line, exp, act, diff(exp, act))) -} - -func sprintfWithLimit(act string, v ...interface{}) string { - s := fmt.Sprintf(act, v...) - if len(s) > 10000 { - return s[:10000] + "...(output trimmed)" - } - return s -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - return "\n\nDiff:\n" + diff -} - -// TolerantVerifyLeakMain verifies go leaks but excludes the go routines that are -// launched as side effects of some of our dependencies. -func TolerantVerifyLeakMain(m *testing.M) { - goleak.VerifyTestMain(m, - // https://github.com/census-instrumentation/opencensus-go/blob/d7677d6af5953e0506ac4c08f349c62b917a443a/stats/view/worker.go#L34 - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), - // https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417 - goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"), - ) -} - -// TolerantVerifyLeak verifies go leaks but excludes the go routines that are -// launched as side effects of some of our dependencies. -func TolerantVerifyLeak(t *testing.T) { - goleak.VerifyNone(t, - // https://github.com/census-instrumentation/opencensus-go/blob/d7677d6af5953e0506ac4c08f349c62b917a443a/stats/view/worker.go#L34 - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), - // https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417 - goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"), - ) -} - -// FaultOrPanicToErr returns error if panic of fault was triggered during execution of function. -func FaultOrPanicToErr(f func()) (err error) { - // Set this go routine to panic on segfault to allow asserting on those. - debug.SetPanicOnFault(true) - defer func() { - if r := recover(); r != nil { - err = errors.Errorf("invoked function panicked or caused segmentation fault: %v", r) - } - debug.SetPanicOnFault(false) - }() - - f() - - return err -} diff --git a/vendor/github.com/gofrs/flock/.travis.yml b/vendor/github.com/gofrs/flock/.travis.yml index b791a74213c2..b16d040fa89e 100644 --- a/vendor/github.com/gofrs/flock/.travis.yml +++ b/vendor/github.com/gofrs/flock/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.10.x - - 1.11.x + - 1.14.x + - 1.15.x script: go test -v -check.vv -race ./... sudo: false notifications: diff --git a/vendor/github.com/gofrs/flock/LICENSE b/vendor/github.com/gofrs/flock/LICENSE index aff7d358e246..8b8ff36fe426 100644 --- a/vendor/github.com/gofrs/flock/LICENSE +++ b/vendor/github.com/gofrs/flock/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2015, Tim Heckman +Copyright (c) 2015-2020, Tim Heckman All rights reserved. Redistribution and use in source and binary forms, with or without @@ -11,9 +11,9 @@ modification, are permitted provided that the following conditions are met: this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -* Neither the name of linode-netint nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. +* Neither the name of gofrs nor the names of its contributors may be used + to endorse or promote products derived from this software without + specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE diff --git a/vendor/github.com/gofrs/flock/README.md b/vendor/github.com/gofrs/flock/README.md index 7375e72eeb45..71ce63692e8e 100644 --- a/vendor/github.com/gofrs/flock/README.md +++ b/vendor/github.com/gofrs/flock/README.md @@ -1,6 +1,6 @@ # flock [![TravisCI Build Status](https://img.shields.io/travis/gofrs/flock/master.svg?style=flat)](https://travis-ci.org/gofrs/flock) -[![GoDoc](https://img.shields.io/badge/godoc-go--flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock) +[![GoDoc](https://img.shields.io/badge/godoc-flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock) [![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE) [![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/flock)](https://goreportcard.com/report/github.com/gofrs/flock) diff --git a/vendor/github.com/gofrs/flock/appveyor.yml b/vendor/github.com/gofrs/flock/appveyor.yml index 6848e94bf886..909b4bf7cb4e 100644 --- a/vendor/github.com/gofrs/flock/appveyor.yml +++ b/vendor/github.com/gofrs/flock/appveyor.yml @@ -7,7 +7,7 @@ clone_folder: 'c:\gopath\src\github.com\gofrs\flock' environment: GOPATH: 'c:\gopath' - GOVERSION: '1.11' + GOVERSION: '1.15' init: - git config --global core.autocrlf input diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go index 8f109b8a967b..95c784ca504b 100644 --- a/vendor/github.com/gofrs/flock/flock.go +++ b/vendor/github.com/gofrs/flock/flock.go @@ -19,6 +19,7 @@ package flock import ( "context" "os" + "runtime" "sync" "time" ) @@ -116,7 +117,15 @@ func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Durati func (f *Flock) setFh() error { // open a new os.File instance // create it if it doesn't exist, and open the file read-only. - fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDONLY, os.FileMode(0600)) + flags := os.O_CREATE + if runtime.GOOS == "aix" { + // AIX cannot preform write-lock (ie exclusive) on a + // read-only file. + flags |= os.O_RDWR + } else { + flags |= os.O_RDONLY + } + fh, err := os.OpenFile(f.path, flags, os.FileMode(0600)) if err != nil { return err } @@ -125,3 +134,11 @@ func (f *Flock) setFh() error { f.fh = fh return nil } + +// ensure the file handle is closed if no lock is held +func (f *Flock) ensureFhState() { + if !f.l && !f.r && f.fh != nil { + f.fh.Close() + f.fh = nil + } +} diff --git a/vendor/github.com/gofrs/flock/flock_aix.go b/vendor/github.com/gofrs/flock/flock_aix.go new file mode 100644 index 000000000000..7277c1b6b265 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_aix.go @@ -0,0 +1,281 @@ +// Copyright 2019 Tim Heckman. All rights reserved. Use of this source code is +// governed by the BSD 3-Clause license that can be found in the LICENSE file. + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code implements the filelock API using POSIX 'fcntl' locks, which attach +// to an (inode, process) pair rather than a file descriptor. To avoid unlocking +// files prematurely when the same file is opened through different descriptors, +// we allow only one read-lock at a time. +// +// This code is adapted from the Go package: +// cmd/go/internal/lockedfile/internal/filelock + +//+build aix + +package flock + +import ( + "errors" + "io" + "os" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +type lockType int16 + +const ( + readLock lockType = unix.F_RDLCK + writeLock lockType = unix.F_WRLCK +) + +type cmdType int + +const ( + tryLock cmdType = unix.F_SETLK + waitLock cmdType = unix.F_SETLKW +) + +type inode = uint64 + +type inodeLock struct { + owner *Flock + queue []<-chan *Flock +} + +var ( + mu sync.Mutex + inodes = map[*Flock]inode{} + locks = map[inode]inodeLock{} +) + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already exclusive-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +// +// If the *Flock has a shared lock (RLock), this may transparently replace the +// shared lock with an exclusive lock on some UNIX-like operating systems. Be +// careful when using exclusive locks in conjunction with shared locks +// (RLock()), because calling Unlock() may accidentally release the exclusive +// lock that was once a shared lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, writeLock) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already shared-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, readLock) +} + +func (f *Flock) lock(locked *bool, flag lockType) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + defer f.ensureFhState() + } + + if _, err := f.doLock(waitLock, flag, true); err != nil { + return err + } + + *locked = true + return nil +} + +func (f *Flock) doLock(cmd cmdType, lt lockType, blocking bool) (bool, error) { + // POSIX locks apply per inode and process, and the lock for an inode is + // released when *any* descriptor for that inode is closed. So we need to + // synchronize access to each inode internally, and must serialize lock and + // unlock calls that refer to the same inode through different descriptors. + fi, err := f.fh.Stat() + if err != nil { + return false, err + } + ino := inode(fi.Sys().(*syscall.Stat_t).Ino) + + mu.Lock() + if i, dup := inodes[f]; dup && i != ino { + mu.Unlock() + return false, &os.PathError{ + Path: f.Path(), + Err: errors.New("inode for file changed since last Lock or RLock"), + } + } + + inodes[f] = ino + + var wait chan *Flock + l := locks[ino] + if l.owner == f { + // This file already owns the lock, but the call may change its lock type. + } else if l.owner == nil { + // No owner: it's ours now. + l.owner = f + } else if !blocking { + // Already owned: cannot take the lock. + mu.Unlock() + return false, nil + } else { + // Already owned: add a channel to wait on. + wait = make(chan *Flock) + l.queue = append(l.queue, wait) + } + locks[ino] = l + mu.Unlock() + + if wait != nil { + wait <- f + } + + err = setlkw(f.fh.Fd(), cmd, lt) + + if err != nil { + f.doUnlock() + if cmd == tryLock && err == unix.EACCES { + return false, nil + } + return false, err + } + + return true, nil +} + +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + if err := f.doUnlock(); err != nil { + return err + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +func (f *Flock) doUnlock() (err error) { + var owner *Flock + mu.Lock() + ino, ok := inodes[f] + if ok { + owner = locks[ino].owner + } + mu.Unlock() + + if owner == f { + err = setlkw(f.fh.Fd(), waitLock, unix.F_UNLCK) + } + + mu.Lock() + l := locks[ino] + if len(l.queue) == 0 { + // No waiters: remove the map entry. + delete(locks, ino) + } else { + // The first waiter is sending us their file now. + // Receive it and update the queue. + l.owner = <-l.queue[0] + l.queue = l.queue[1:] + locks[ino] = l + } + delete(inodes, f) + mu.Unlock() + + return err +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, writeLock) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being share-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, readLock) +} + +func (f *Flock) try(locked *bool, flag lockType) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + defer f.ensureFhState() + } + + haslock, err := f.doLock(tryLock, flag, false) + if err != nil { + return false, err + } + + *locked = haslock + return haslock, nil +} + +// setlkw calls FcntlFlock with cmd for the entire file indicated by fd. +func setlkw(fd uintptr, cmd cmdType, lt lockType) error { + for { + err := unix.FcntlFlock(fd, int(cmd), &unix.Flock_t{ + Type: int16(lt), + Whence: io.SeekStart, + Start: 0, + Len: 0, // All bytes. + }) + if err != unix.EINTR { + return err + } + } +} diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go index 45f71a707c3f..c315a3e29084 100644 --- a/vendor/github.com/gofrs/flock/flock_unix.go +++ b/vendor/github.com/gofrs/flock/flock_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by the BSD 3-Clause // license that can be found in the LICENSE file. -// +build !windows +// +build !aix,!windows package flock @@ -51,6 +51,7 @@ func (f *Flock) lock(locked *bool, flag int) error { if err := f.setFh(); err != nil { return err } + defer f.ensureFhState() } if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil { @@ -142,6 +143,7 @@ func (f *Flock) try(locked *bool, flag int) (bool, error) { if err := f.setFh(); err != nil { return false, err } + defer f.ensureFhState() } var retried bool diff --git a/vendor/github.com/gofrs/flock/flock_windows.go b/vendor/github.com/gofrs/flock/flock_windows.go index 9f4a5f10d246..ddb534ccef09 100644 --- a/vendor/github.com/gofrs/flock/flock_windows.go +++ b/vendor/github.com/gofrs/flock/flock_windows.go @@ -46,6 +46,7 @@ func (f *Flock) lock(locked *bool, flag uint32) error { if err := f.setFh(); err != nil { return err } + defer f.ensureFhState() } if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { @@ -122,6 +123,7 @@ func (f *Flock) try(locked *bool, flag uint32) (bool, error) { if err := f.setFh(); err != nil { return false, err } + defer f.ensureFhState() } _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{}) diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md index f5d551ca8fd8..30f2f2a6f70c 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/README.md +++ b/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -54,9 +54,9 @@ import "github.com/golang-jwt/jwt/v4" See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage: -* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) -* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) -* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples) +* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-Parse-Hmac) +* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-New-Hmac) +* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#pkg-examples) ## Extensions @@ -96,7 +96,7 @@ A token is simply a JSON object that is signed by its author. this tells you exa * The author of the token was in the possession of the signing secret * The data has not been modified since it was signed -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. The companion project https://github.com/golang-jwt/jwe aims at a (very) experimental implementation of the JWE standard. ### Choosing a Signing Method @@ -110,10 +110,10 @@ Asymmetric signing methods, such as RSA, use different keys for signing and veri Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: -* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation -* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation -* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation -* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation +* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation ### JWT and OAuth @@ -131,7 +131,7 @@ This library uses descriptive error messages whenever possible. If you are not g ## More -Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). +Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v4). The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go index 9d95cad2bf27..364cec8773cd 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/claims.go +++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go @@ -265,9 +265,5 @@ func verifyIss(iss string, cmp string, required bool) bool { if iss == "" { return !required } - if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { - return true - } else { - return false - } + return subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 } diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go index 2f61a69d7fcb..c0a6f6927917 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -42,6 +42,13 @@ func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) } +// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims +// interface. This provides default values which can be overridden and allows a caller to use their own type, rather +// than the default MapClaims implementation of Claims. +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { token, parts, err := p.ParseUnverified(tokenString, claims) if err != nil { diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go index 3cb0f3f0e4c0..71e909ea6538 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/token.go +++ b/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -99,6 +99,11 @@ func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token return NewParser(options...).Parse(tokenString, keyFunc) } +// ParseWithClaims is a shortcut for NewParser().ParseWithClaims(). +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) } diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index bec4c2494e82..701f2385b684 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -93,7 +93,8 @@ const ( AVX512VNNI // AVX-512 Vector Neural Network Instructions AVX512VP2INTERSECT // AVX-512 Intersect for D/Q AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword - AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one. + AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one + AVXVNNI // AVX (VEX encoded) VNNI neural network instructions BMI1 // Bit Manipulation Instruction Set 1 BMI2 // Bit Manipulation Instruction Set 2 CETIBT // Intel CET Indirect Branch Tracking @@ -102,6 +103,7 @@ const ( CLMUL // Carry-less Multiplication CLZERO // CLZERO instruction supported CMOV // i686 CMOV + CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB CMPXCHG8 // CMPXCHG8 instruction CPBOOST // Core Performance Boost CX16 // CMPXCHG16B Instruction @@ -112,8 +114,9 @@ const ( FMA4 // Bulldozer FMA4 functions FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 FXSROPT // FXSAVE/FXRSTOR optimizations - GFNI // Galois Field New Instructions + GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. HLE // Hardware Lock Elision + HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR HTT // Hyperthreading (enabled) HWA // Hardware assert supported. Indicates support for MSRC001_10 HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors @@ -131,6 +134,8 @@ const ( INT_WBINVD // WBINVD/WBNOINVD are interruptible. INVLPGB // NVLPGB and TLBSYNC instruction supported LAHF // LAHF/SAHF in long mode + LAM // If set, CPU supports Linear Address Masking + LBRVIRT // LBR virtualization LZCNT // LZCNT instruction MCAOVERFLOW // MCA overflow recovery support. MCOMMIT // MCOMMIT instruction supported @@ -139,9 +144,11 @@ const ( MOVBE // MOVBE instruction (big-endian) MOVDIR64B // Move 64 Bytes as Direct Store MOVDIRI // Move Doubleword as Direct Store + MOVSB_ZL // Fast Zero-Length MOVSB MPX // Intel MPX (Memory Protection Extensions) - MSR_PAGEFLUSH // Page Flush MSR available MSRIRC // Instruction Retired Counter MSR available + MSR_PAGEFLUSH // Page Flush MSR available + NRIPS // Indicates support for NRIP save on VMEXIT NX // NX (No-Execute) bit OSXSAVE // XSAVE enabled by OS PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption @@ -174,15 +181,25 @@ const ( SSE4A // AMD Barcelona microarchitecture SSE4a instructions SSSE3 // Conroe SSSE3 functions STIBP // Single Thread Indirect Branch Predictors + STOSB_SHORT // Fast short STOSB SUCCOR // Software uncorrectable error containment and recovery capability. + SVM // AMD Secure Virtual Machine + SVMDA // Indicates support for the SVM decode assists. + SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control + SVML // AMD SVM lock. Indicates support for SVM-Lock. + SVMNP // AMD SVM nested paging + SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter + SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold TBM // AMD Trailing Bit Manipulation TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. + TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 TSXLDTRK // Intel TSX Suspend Load Address Tracking - VAES // Vector AES + VAES // Vector AES. AVX(512) versions requires additional checks. + VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits. VMPL // AMD VM Permission Levels supported VMSA_REGPROT // AMD VMSA Register Protection supported VMX // Virtual Machine Extensions - VPCLMULQDQ // Carry-Less Multiplication Quadword + VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. VTE // AMD Virtual Transparent Encryption supported WAITPKG // TPAUSE, UMONITOR, UMWAIT WBNOINVD // Write Back and Do Not Invalidate Cache @@ -219,7 +236,6 @@ const ( SM3 // SM3 instructions SM4 // SM4 instructions SVE // Scalable Vector Extension - // Keep it last. It automatically defines the size of []flagSet lastID @@ -1021,7 +1037,6 @@ func support() flagSet { // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. if mfi >= 7 { _, ebx, ecx, edx := cpuidex(7, 0) - eax1, _, _, _ := cpuidex(7, 1) if fs.inSet(AVX) && (ebx&0x00000020) != 0 { fs.set(AVX2) } @@ -1038,15 +1053,20 @@ func support() flagSet { fs.setIf(ebx&(1<<18) != 0, RDSEED) fs.setIf(ebx&(1<<19) != 0, ADX) fs.setIf(ebx&(1<<29) != 0, SHA) + // CPUID.(EAX=7, ECX=0).ECX fs.setIf(ecx&(1<<5) != 0, WAITPKG) fs.setIf(ecx&(1<<7) != 0, CETSS) + fs.setIf(ecx&(1<<8) != 0, GFNI) + fs.setIf(ecx&(1<<9) != 0, VAES) + fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ) fs.setIf(ecx&(1<<13) != 0, TME) fs.setIf(ecx&(1<<25) != 0, CLDEMOTE) fs.setIf(ecx&(1<<27) != 0, MOVDIRI) fs.setIf(ecx&(1<<28) != 0, MOVDIR64B) fs.setIf(ecx&(1<<29) != 0, ENQCMD) fs.setIf(ecx&(1<<30) != 0, SGXLC) + // CPUID.(EAX=7, ECX=0).EDX fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT) fs.setIf(edx&(1<<14) != 0, SERIALIZE) @@ -1056,6 +1076,15 @@ func support() flagSet { fs.setIf(edx&(1<<26) != 0, IBPB) fs.setIf(edx&(1<<27) != 0, STIBP) + // CPUID.(EAX=7, ECX=1) + eax1, _, _, _ := cpuidex(7, 1) + fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI) + fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL) + fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT) + fs.setIf(eax1&(1<<12) != 0, CMPSB_SCADBS_SHORT) + fs.setIf(eax1&(1<<22) != 0, HRESET) + fs.setIf(eax1&(1<<26) != 0, LAM) + // Only detect AVX-512 features if XGETBV is supported if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { // Check for OS support @@ -1080,9 +1109,6 @@ func support() flagSet { // ecx fs.setIf(ecx&(1<<1) != 0, AVX512VBMI) fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2) - fs.setIf(ecx&(1<<8) != 0, GFNI) - fs.setIf(ecx&(1<<9) != 0, VAES) - fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ) fs.setIf(ecx&(1<<11) != 0, AVX512VNNI) fs.setIf(ecx&(1<<12) != 0, AVX512BITALG) fs.setIf(ecx&(1<<14) != 0, AVX512VPOPCNTDQ) @@ -1125,13 +1151,17 @@ func support() flagSet { fs.set(LZCNT) fs.set(POPCNT) } + // ECX fs.setIf((c&(1<<0)) != 0, LAHF) + fs.setIf((c&(1<<2)) != 0, SVM) + fs.setIf((c&(1<<6)) != 0, SSE4A) fs.setIf((c&(1<<10)) != 0, IBS) + + // EDX fs.setIf((d&(1<<31)) != 0, AMD3DNOW) fs.setIf((d&(1<<30)) != 0, AMD3DNOWEXT) fs.setIf((d&(1<<23)) != 0, MMX) fs.setIf((d&(1<<22)) != 0, MMXEXT) - fs.setIf((c&(1<<6)) != 0, SSE4A) fs.setIf(d&(1<<20) != 0, NX) fs.setIf(d&(1<<27) != 0, RDTSCP) @@ -1162,6 +1192,20 @@ func support() flagSet { fs.setIf((b&(1<<0)) != 0, CLZERO) } + if fs.inSet(SVM) && maxExtendedFunction() >= 0x8000000A { + _, _, _, edx := cpuid(0x8000000A) + fs.setIf((edx>>0)&1 == 1, SVMNP) + fs.setIf((edx>>1)&1 == 1, LBRVIRT) + fs.setIf((edx>>2)&1 == 1, SVML) + fs.setIf((edx>>3)&1 == 1, NRIPS) + fs.setIf((edx>>4)&1 == 1, TSCRATEMSR) + fs.setIf((edx>>5)&1 == 1, VMCBCLEAN) + fs.setIf((edx>>6)&1 == 1, SVMFBASID) + fs.setIf((edx>>7)&1 == 1, SVMDA) + fs.setIf((edx>>10)&1 == 1, SVMPF) + fs.setIf((edx>>12)&1 == 1, SVMPFT) + } + if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) { eax, _, _, _ := cpuid(0x8000001b) fs.setIf((eax>>0)&1 == 1, IBSFFV) diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index 8cf57cae8b55..a9b3e36c707f 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -34,136 +34,153 @@ func _() { _ = x[AVX512VP2INTERSECT-24] _ = x[AVX512VPOPCNTDQ-25] _ = x[AVXSLOW-26] - _ = x[BMI1-27] - _ = x[BMI2-28] - _ = x[CETIBT-29] - _ = x[CETSS-30] - _ = x[CLDEMOTE-31] - _ = x[CLMUL-32] - _ = x[CLZERO-33] - _ = x[CMOV-34] - _ = x[CMPXCHG8-35] - _ = x[CPBOOST-36] - _ = x[CX16-37] - _ = x[ENQCMD-38] - _ = x[ERMS-39] - _ = x[F16C-40] - _ = x[FMA3-41] - _ = x[FMA4-42] - _ = x[FXSR-43] - _ = x[FXSROPT-44] - _ = x[GFNI-45] - _ = x[HLE-46] - _ = x[HTT-47] - _ = x[HWA-48] - _ = x[HYPERVISOR-49] - _ = x[IBPB-50] - _ = x[IBS-51] - _ = x[IBSBRNTRGT-52] - _ = x[IBSFETCHSAM-53] - _ = x[IBSFFV-54] - _ = x[IBSOPCNT-55] - _ = x[IBSOPCNTEXT-56] - _ = x[IBSOPSAM-57] - _ = x[IBSRDWROPCNT-58] - _ = x[IBSRIPINVALIDCHK-59] - _ = x[IBS_PREVENTHOST-60] - _ = x[INT_WBINVD-61] - _ = x[INVLPGB-62] - _ = x[LAHF-63] - _ = x[LZCNT-64] - _ = x[MCAOVERFLOW-65] - _ = x[MCOMMIT-66] - _ = x[MMX-67] - _ = x[MMXEXT-68] - _ = x[MOVBE-69] - _ = x[MOVDIR64B-70] - _ = x[MOVDIRI-71] - _ = x[MPX-72] - _ = x[MSR_PAGEFLUSH-73] - _ = x[MSRIRC-74] - _ = x[NX-75] - _ = x[OSXSAVE-76] - _ = x[PCONFIG-77] - _ = x[POPCNT-78] - _ = x[RDPRU-79] - _ = x[RDRAND-80] - _ = x[RDSEED-81] - _ = x[RDTSCP-82] - _ = x[RTM-83] - _ = x[RTM_ALWAYS_ABORT-84] - _ = x[SCE-85] - _ = x[SERIALIZE-86] - _ = x[SEV-87] - _ = x[SEV_64BIT-88] - _ = x[SEV_ALTERNATIVE-89] - _ = x[SEV_DEBUGSWAP-90] - _ = x[SEV_ES-91] - _ = x[SEV_RESTRICTED-92] - _ = x[SEV_SNP-93] - _ = x[SGX-94] - _ = x[SGXLC-95] - _ = x[SHA-96] - _ = x[SME-97] - _ = x[SME_COHERENT-98] - _ = x[SSE-99] - _ = x[SSE2-100] - _ = x[SSE3-101] - _ = x[SSE4-102] - _ = x[SSE42-103] - _ = x[SSE4A-104] - _ = x[SSSE3-105] - _ = x[STIBP-106] - _ = x[SUCCOR-107] - _ = x[TBM-108] - _ = x[TME-109] - _ = x[TSXLDTRK-110] - _ = x[VAES-111] - _ = x[VMPL-112] - _ = x[VMSA_REGPROT-113] - _ = x[VMX-114] - _ = x[VPCLMULQDQ-115] - _ = x[VTE-116] - _ = x[WAITPKG-117] - _ = x[WBNOINVD-118] - _ = x[X87-119] - _ = x[XGETBV1-120] - _ = x[XOP-121] - _ = x[XSAVE-122] - _ = x[XSAVEC-123] - _ = x[XSAVEOPT-124] - _ = x[XSAVES-125] - _ = x[AESARM-126] - _ = x[ARMCPUID-127] - _ = x[ASIMD-128] - _ = x[ASIMDDP-129] - _ = x[ASIMDHP-130] - _ = x[ASIMDRDM-131] - _ = x[ATOMICS-132] - _ = x[CRC32-133] - _ = x[DCPOP-134] - _ = x[EVTSTRM-135] - _ = x[FCMA-136] - _ = x[FP-137] - _ = x[FPHP-138] - _ = x[GPA-139] - _ = x[JSCVT-140] - _ = x[LRCPC-141] - _ = x[PMULL-142] - _ = x[SHA1-143] - _ = x[SHA2-144] - _ = x[SHA3-145] - _ = x[SHA512-146] - _ = x[SM3-147] - _ = x[SM4-148] - _ = x[SVE-149] - _ = x[lastID-150] + _ = x[AVXVNNI-27] + _ = x[BMI1-28] + _ = x[BMI2-29] + _ = x[CETIBT-30] + _ = x[CETSS-31] + _ = x[CLDEMOTE-32] + _ = x[CLMUL-33] + _ = x[CLZERO-34] + _ = x[CMOV-35] + _ = x[CMPSB_SCADBS_SHORT-36] + _ = x[CMPXCHG8-37] + _ = x[CPBOOST-38] + _ = x[CX16-39] + _ = x[ENQCMD-40] + _ = x[ERMS-41] + _ = x[F16C-42] + _ = x[FMA3-43] + _ = x[FMA4-44] + _ = x[FXSR-45] + _ = x[FXSROPT-46] + _ = x[GFNI-47] + _ = x[HLE-48] + _ = x[HRESET-49] + _ = x[HTT-50] + _ = x[HWA-51] + _ = x[HYPERVISOR-52] + _ = x[IBPB-53] + _ = x[IBS-54] + _ = x[IBSBRNTRGT-55] + _ = x[IBSFETCHSAM-56] + _ = x[IBSFFV-57] + _ = x[IBSOPCNT-58] + _ = x[IBSOPCNTEXT-59] + _ = x[IBSOPSAM-60] + _ = x[IBSRDWROPCNT-61] + _ = x[IBSRIPINVALIDCHK-62] + _ = x[IBS_PREVENTHOST-63] + _ = x[INT_WBINVD-64] + _ = x[INVLPGB-65] + _ = x[LAHF-66] + _ = x[LAM-67] + _ = x[LBRVIRT-68] + _ = x[LZCNT-69] + _ = x[MCAOVERFLOW-70] + _ = x[MCOMMIT-71] + _ = x[MMX-72] + _ = x[MMXEXT-73] + _ = x[MOVBE-74] + _ = x[MOVDIR64B-75] + _ = x[MOVDIRI-76] + _ = x[MOVSB_ZL-77] + _ = x[MPX-78] + _ = x[MSRIRC-79] + _ = x[MSR_PAGEFLUSH-80] + _ = x[NRIPS-81] + _ = x[NX-82] + _ = x[OSXSAVE-83] + _ = x[PCONFIG-84] + _ = x[POPCNT-85] + _ = x[RDPRU-86] + _ = x[RDRAND-87] + _ = x[RDSEED-88] + _ = x[RDTSCP-89] + _ = x[RTM-90] + _ = x[RTM_ALWAYS_ABORT-91] + _ = x[SCE-92] + _ = x[SERIALIZE-93] + _ = x[SEV-94] + _ = x[SEV_64BIT-95] + _ = x[SEV_ALTERNATIVE-96] + _ = x[SEV_DEBUGSWAP-97] + _ = x[SEV_ES-98] + _ = x[SEV_RESTRICTED-99] + _ = x[SEV_SNP-100] + _ = x[SGX-101] + _ = x[SGXLC-102] + _ = x[SHA-103] + _ = x[SME-104] + _ = x[SME_COHERENT-105] + _ = x[SSE-106] + _ = x[SSE2-107] + _ = x[SSE3-108] + _ = x[SSE4-109] + _ = x[SSE42-110] + _ = x[SSE4A-111] + _ = x[SSSE3-112] + _ = x[STIBP-113] + _ = x[STOSB_SHORT-114] + _ = x[SUCCOR-115] + _ = x[SVM-116] + _ = x[SVMDA-117] + _ = x[SVMFBASID-118] + _ = x[SVML-119] + _ = x[SVMNP-120] + _ = x[SVMPF-121] + _ = x[SVMPFT-122] + _ = x[TBM-123] + _ = x[TME-124] + _ = x[TSCRATEMSR-125] + _ = x[TSXLDTRK-126] + _ = x[VAES-127] + _ = x[VMCBCLEAN-128] + _ = x[VMPL-129] + _ = x[VMSA_REGPROT-130] + _ = x[VMX-131] + _ = x[VPCLMULQDQ-132] + _ = x[VTE-133] + _ = x[WAITPKG-134] + _ = x[WBNOINVD-135] + _ = x[X87-136] + _ = x[XGETBV1-137] + _ = x[XOP-138] + _ = x[XSAVE-139] + _ = x[XSAVEC-140] + _ = x[XSAVEOPT-141] + _ = x[XSAVES-142] + _ = x[AESARM-143] + _ = x[ARMCPUID-144] + _ = x[ASIMD-145] + _ = x[ASIMDDP-146] + _ = x[ASIMDHP-147] + _ = x[ASIMDRDM-148] + _ = x[ATOMICS-149] + _ = x[CRC32-150] + _ = x[DCPOP-151] + _ = x[EVTSTRM-152] + _ = x[FCMA-153] + _ = x[FP-154] + _ = x[FPHP-155] + _ = x[GPA-156] + _ = x[JSCVT-157] + _ = x[LRCPC-158] + _ = x[PMULL-159] + _ = x[SHA1-160] + _ = x[SHA2-161] + _ = x[SHA3-162] + _ = x[SHA512-163] + _ = x[SM3-164] + _ = x[SM4-165] + _ = x[SVE-166] + _ = x[lastID-167] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_PREVENTHOSTINT_WBINVDINVLPGBLAHFLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMPXMSR_PAGEFLUSHMSRIRCNXOSXSAVEPCONFIGPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSUCCORTBMTMETSXLDTRKVAESVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWAVXVNNIBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHRESETHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_PREVENTHOSTINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTTBMTMETSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 234, 238, 244, 249, 257, 262, 268, 272, 280, 287, 291, 297, 301, 305, 309, 313, 317, 324, 328, 331, 334, 337, 347, 351, 354, 364, 375, 381, 389, 400, 408, 420, 436, 451, 461, 468, 472, 477, 488, 495, 498, 504, 509, 518, 525, 528, 541, 547, 549, 556, 563, 569, 574, 580, 586, 592, 595, 611, 614, 623, 626, 635, 650, 663, 669, 683, 690, 693, 698, 701, 704, 716, 719, 723, 727, 731, 736, 741, 746, 751, 757, 760, 763, 771, 775, 779, 791, 794, 804, 807, 814, 822, 825, 832, 835, 840, 846, 854, 860, 866, 874, 879, 886, 893, 901, 908, 913, 918, 925, 929, 931, 935, 938, 943, 948, 953, 957, 961, 965, 971, 974, 977, 980, 986} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 237, 241, 245, 251, 256, 264, 269, 275, 279, 297, 305, 312, 316, 322, 326, 330, 334, 338, 342, 349, 353, 356, 362, 365, 368, 378, 382, 385, 395, 406, 412, 420, 431, 439, 451, 467, 482, 492, 499, 503, 506, 513, 518, 529, 536, 539, 545, 550, 559, 566, 574, 577, 583, 596, 601, 603, 610, 617, 623, 628, 634, 640, 646, 649, 665, 668, 677, 680, 689, 704, 717, 723, 737, 744, 747, 752, 755, 758, 770, 773, 777, 781, 785, 790, 795, 800, 805, 816, 822, 825, 830, 839, 843, 848, 853, 859, 862, 865, 875, 883, 887, 896, 900, 912, 915, 925, 928, 935, 943, 946, 953, 956, 961, 967, 975, 981, 987, 995, 1000, 1007, 1014, 1022, 1029, 1034, 1039, 1046, 1050, 1052, 1056, 1059, 1064, 1069, 1074, 1078, 1082, 1086, 1092, 1095, 1098, 1101, 1107} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go index 8d2cb0368bcf..d91d0210947e 100644 --- a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go +++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go @@ -2,18 +2,120 @@ package cpuid -import "runtime" +import ( + "runtime" + "strings" + + "golang.org/x/sys/unix" +) func detectOS(c *CPUInfo) bool { + if runtime.GOOS != "ios" { + tryToFillCPUInfoFomSysctl(c) + } // There are no hw.optional sysctl values for the below features on Mac OS 11.0 // to detect their supported state dynamically. Assume the CPU features that // Apple Silicon M1 supports to be available as a minimal set of features // to all Go programs running on darwin/arm64. // TODO: Add more if we know them. c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2) - c.PhysicalCores = runtime.NumCPU() - // For now assuming 1 thread per core... - c.ThreadsPerCore = 1 - c.LogicalCores = c.PhysicalCores + return true } + +func sysctlGetBool(name string) bool { + value, err := unix.SysctlUint32(name) + if err != nil { + return false + } + return value != 0 +} + +func sysctlGetString(name string) string { + value, err := unix.Sysctl(name) + if err != nil { + return "" + } + return value +} + +func sysctlGetInt(unknown int, names ...string) int { + for _, name := range names { + value, err := unix.SysctlUint32(name) + if err != nil { + continue + } + if value != 0 { + return int(value) + } + } + return unknown +} + +func sysctlGetInt64(unknown int, names ...string) int { + for _, name := range names { + value64, err := unix.SysctlUint64(name) + if err != nil { + continue + } + if int(value64) != unknown { + return int(value64) + } + } + return unknown +} + +func setFeature(c *CPUInfo, name string, feature FeatureID) { + c.featureSet.setIf(sysctlGetBool(name), feature) +} +func tryToFillCPUInfoFomSysctl(c *CPUInfo) { + c.BrandName = sysctlGetString("machdep.cpu.brand_string") + + if len(c.BrandName) != 0 { + c.VendorString = strings.Fields(c.BrandName)[0] + } + + c.PhysicalCores = sysctlGetInt(runtime.NumCPU(), "hw.physicalcpu") + c.ThreadsPerCore = sysctlGetInt(1, "machdep.cpu.thread_count", "kern.num_threads") / + sysctlGetInt(1, "hw.physicalcpu") + c.LogicalCores = sysctlGetInt(runtime.NumCPU(), "machdep.cpu.core_count") + c.Family = sysctlGetInt(0, "machdep.cpu.family", "hw.cpufamily") + c.Model = sysctlGetInt(0, "machdep.cpu.model") + c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize") + c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize") + c.Cache.L1D = sysctlGetInt64(-1, "hw.l1icachesize") + c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize") + c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize") + + // from https://developer.arm.com/downloads/-/exploration-tools/feature-names-for-a-profile + setFeature(c, "hw.optional.arm.FEAT_AES", AESARM) + setFeature(c, "hw.optional.AdvSIMD", ASIMD) + setFeature(c, "hw.optional.arm.FEAT_DotProd", ASIMDDP) + setFeature(c, "hw.optional.arm.FEAT_RDM", ASIMDRDM) + setFeature(c, "hw.optional.FEAT_CRC32", CRC32) + setFeature(c, "hw.optional.arm.FEAT_DPB", DCPOP) + // setFeature(c, "", EVTSTRM) + setFeature(c, "hw.optional.arm.FEAT_FCMA", FCMA) + setFeature(c, "hw.optional.arm.FEAT_FP", FP) + setFeature(c, "hw.optional.arm.FEAT_FP16", FPHP) + setFeature(c, "hw.optional.arm.FEAT_PAuth", GPA) + setFeature(c, "hw.optional.arm.FEAT_JSCVT", JSCVT) + setFeature(c, "hw.optional.arm.FEAT_LRCPC", LRCPC) + setFeature(c, "hw.optional.arm.FEAT_PMULL", PMULL) + setFeature(c, "hw.optional.arm.FEAT_SHA1", SHA1) + setFeature(c, "hw.optional.arm.FEAT_SHA256", SHA2) + setFeature(c, "hw.optional.arm.FEAT_SHA3", SHA3) + setFeature(c, "hw.optional.arm.FEAT_SHA512", SHA512) + // setFeature(c, "", SM3) + // setFeature(c, "", SM4) + setFeature(c, "hw.optional.arm.FEAT_SVE", SVE) + + // from empirical observation + setFeature(c, "hw.optional.AdvSIMD_HPFPCvt", ASIMDHP) + setFeature(c, "hw.optional.armv8_1_atomics", ATOMICS) + setFeature(c, "hw.optional.floatingpoint", FP) + setFeature(c, "hw.optional.armv8_2_sha3", SHA3) + setFeature(c, "hw.optional.armv8_2_sha512", SHA512) + setFeature(c, "hw.optional.armv8_3_compnum", FCMA) + setFeature(c, "hw.optional.armv8_crc32", CRC32) +} diff --git a/vendor/github.com/kylelemons/godebug/LICENSE b/vendor/github.com/kylelemons/godebug/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kylelemons/godebug/diff/diff.go b/vendor/github.com/kylelemons/godebug/diff/diff.go new file mode 100644 index 000000000000..200e596c6259 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/diff/diff.go @@ -0,0 +1,186 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package diff implements a linewise diff algorithm. +package diff + +import ( + "bytes" + "fmt" + "strings" +) + +// Chunk represents a piece of the diff. A chunk will not have both added and +// deleted lines. Equal lines are always after any added or deleted lines. +// A Chunk may or may not have any lines in it, especially for the first or last +// chunk in a computation. +type Chunk struct { + Added []string + Deleted []string + Equal []string +} + +func (c *Chunk) empty() bool { + return len(c.Added) == 0 && len(c.Deleted) == 0 && len(c.Equal) == 0 +} + +// Diff returns a string containing a line-by-line unified diff of the linewise +// changes required to make A into B. Each line is prefixed with '+', '-', or +// ' ' to indicate if it should be added, removed, or is correct respectively. +func Diff(A, B string) string { + aLines := strings.Split(A, "\n") + bLines := strings.Split(B, "\n") + + chunks := DiffChunks(aLines, bLines) + + buf := new(bytes.Buffer) + for _, c := range chunks { + for _, line := range c.Added { + fmt.Fprintf(buf, "+%s\n", line) + } + for _, line := range c.Deleted { + fmt.Fprintf(buf, "-%s\n", line) + } + for _, line := range c.Equal { + fmt.Fprintf(buf, " %s\n", line) + } + } + return strings.TrimRight(buf.String(), "\n") +} + +// DiffChunks uses an O(D(N+M)) shortest-edit-script algorithm +// to compute the edits required from A to B and returns the +// edit chunks. +func DiffChunks(a, b []string) []Chunk { + // algorithm: http://www.xmailserver.org/diff2.pdf + + // We'll need these quantities a lot. + alen, blen := len(a), len(b) // M, N + + // At most, it will require len(a) deletions and len(b) additions + // to transform a into b. + maxPath := alen + blen // MAX + if maxPath == 0 { + // degenerate case: two empty lists are the same + return nil + } + + // Store the endpoint of the path for diagonals. + // We store only the a index, because the b index on any diagonal + // (which we know during the loop below) is aidx-diag. + // endpoint[maxPath] represents the 0 diagonal. + // + // Stated differently: + // endpoint[d] contains the aidx of a furthest reaching path in diagonal d + endpoint := make([]int, 2*maxPath+1) // V + + saved := make([][]int, 0, 8) // Vs + save := func() { + dup := make([]int, len(endpoint)) + copy(dup, endpoint) + saved = append(saved, dup) + } + + var editDistance int // D +dLoop: + for editDistance = 0; editDistance <= maxPath; editDistance++ { + // The 0 diag(onal) represents equality of a and b. Each diagonal to + // the left is numbered one lower, to the right is one higher, from + // -alen to +blen. Negative diagonals favor differences from a, + // positive diagonals favor differences from b. The edit distance to a + // diagonal d cannot be shorter than d itself. + // + // The iterations of this loop cover either odds or evens, but not both, + // If odd indices are inputs, even indices are outputs and vice versa. + for diag := -editDistance; diag <= editDistance; diag += 2 { // k + var aidx int // x + switch { + case diag == -editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath-editDistance+1] + 0 + case diag == editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath+editDistance-1] + 1 + case endpoint[maxPath+diag+1] > endpoint[maxPath+diag-1]: + // diagonal d+1 was farther along, so use that + aidx = endpoint[maxPath+diag+1] + 0 + default: + // diagonal d-1 was farther (or the same), so use that + aidx = endpoint[maxPath+diag-1] + 1 + } + // On diagonal d, we can compute bidx from aidx. + bidx := aidx - diag // y + // See how far we can go on this diagonal before we find a difference. + for aidx < alen && bidx < blen && a[aidx] == b[bidx] { + aidx++ + bidx++ + } + // Store the end of the current edit chain. + endpoint[maxPath+diag] = aidx + // If we've found the end of both inputs, we're done! + if aidx >= alen && bidx >= blen { + save() // save the final path + break dLoop + } + } + save() // save the current path + } + if editDistance == 0 { + return nil + } + chunks := make([]Chunk, editDistance+1) + + x, y := alen, blen + for d := editDistance; d > 0; d-- { + endpoint := saved[d] + diag := x - y + insert := diag == -d || (diag != d && endpoint[maxPath+diag-1] < endpoint[maxPath+diag+1]) + + x1 := endpoint[maxPath+diag] + var x0, xM, kk int + if insert { + kk = diag + 1 + x0 = endpoint[maxPath+kk] + xM = x0 + } else { + kk = diag - 1 + x0 = endpoint[maxPath+kk] + xM = x0 + 1 + } + y0 := x0 - kk + + var c Chunk + if insert { + c.Added = b[y0:][:1] + } else { + c.Deleted = a[x0:][:1] + } + if xM < x1 { + c.Equal = a[xM:][:x1-xM] + } + + x, y = x0, y0 + chunks[d] = c + } + if x > 0 { + chunks[0].Equal = a[:x] + } + if chunks[0].empty() { + chunks = chunks[1:] + } + if len(chunks) == 0 { + return nil + } + return chunks +} diff --git a/vendor/github.com/kylelemons/godebug/pretty/.gitignore b/vendor/github.com/kylelemons/godebug/pretty/.gitignore new file mode 100644 index 000000000000..fa9a735da3c1 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/.gitignore @@ -0,0 +1,5 @@ +*.test +*.bench +*.golden +*.txt +*.prof diff --git a/vendor/github.com/kylelemons/godebug/pretty/doc.go b/vendor/github.com/kylelemons/godebug/pretty/doc.go new file mode 100644 index 000000000000..03b5718a70db --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/doc.go @@ -0,0 +1,25 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pretty pretty-prints Go structures. +// +// This package uses reflection to examine a Go value and can +// print out in a nice, aligned fashion. It supports three +// modes (normal, compact, and extended) for advanced use. +// +// See the Reflect and Print examples for what the output looks like. +package pretty + +// TODO: +// - Catch cycles diff --git a/vendor/github.com/kylelemons/godebug/pretty/public.go b/vendor/github.com/kylelemons/godebug/pretty/public.go new file mode 100644 index 000000000000..fbc5d7abbf87 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/public.go @@ -0,0 +1,188 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "bytes" + "fmt" + "io" + "net" + "reflect" + "time" + + "github.com/kylelemons/godebug/diff" +) + +// A Config represents optional configuration parameters for formatting. +// +// Some options, notably ShortList, dramatically increase the overhead +// of pretty-printing a value. +type Config struct { + // Verbosity options + Compact bool // One-line output. Overrides Diffable. + Diffable bool // Adds extra newlines for more easily diffable output. + + // Field and value options + IncludeUnexported bool // Include unexported fields in output + PrintStringers bool // Call String on a fmt.Stringer + PrintTextMarshalers bool // Call MarshalText on an encoding.TextMarshaler + SkipZeroFields bool // Skip struct fields that have a zero value. + + // Output transforms + ShortList int // Maximum character length for short lists if nonzero. + + // Type-specific overrides + // + // Formatter maps a type to a function that will provide a one-line string + // representation of the input value. Conceptually: + // Formatter[reflect.TypeOf(v)](v) = "v as a string" + // + // Note that the first argument need not explicitly match the type, it must + // merely be callable with it. + // + // When processing an input value, if its type exists as a key in Formatter: + // 1) If the value is nil, no stringification is performed. + // This allows overriding of PrintStringers and PrintTextMarshalers. + // 2) The value will be called with the input as its only argument. + // The function must return a string as its first return value. + // + // In addition to func literals, two common values for this will be: + // fmt.Sprint (function) func Sprint(...interface{}) string + // Type.String (method) func (Type) String() string + // + // Note that neither of these work if the String method is a pointer + // method and the input will be provided as a value. In that case, + // use a function that calls .String on the formal value parameter. + Formatter map[reflect.Type]interface{} + + // If TrackCycles is enabled, pretty will detect and track + // self-referential structures. If a self-referential structure (aka a + // "recursive" value) is detected, numbered placeholders will be emitted. + // + // Pointer tracking is disabled by default for performance reasons. + TrackCycles bool +} + +// Default Config objects +var ( + // DefaultFormatter is the default set of overrides for stringification. + DefaultFormatter = map[reflect.Type]interface{}{ + reflect.TypeOf(time.Time{}): fmt.Sprint, + reflect.TypeOf(net.IP{}): fmt.Sprint, + reflect.TypeOf((*error)(nil)).Elem(): fmt.Sprint, + } + + // CompareConfig is the default configuration used for Compare. + CompareConfig = &Config{ + Diffable: true, + IncludeUnexported: true, + Formatter: DefaultFormatter, + } + + // DefaultConfig is the default configuration used for all other top-level functions. + DefaultConfig = &Config{ + Formatter: DefaultFormatter, + } + + // CycleTracker is a convenience config for formatting and comparing recursive structures. + CycleTracker = &Config{ + Diffable: true, + Formatter: DefaultFormatter, + TrackCycles: true, + } +) + +func (cfg *Config) fprint(buf *bytes.Buffer, vals ...interface{}) { + ref := &reflector{ + Config: cfg, + } + if cfg.TrackCycles { + ref.pointerTracker = new(pointerTracker) + } + for i, val := range vals { + if i > 0 { + buf.WriteByte('\n') + } + newFormatter(cfg, buf).write(ref.val2node(reflect.ValueOf(val))) + } +} + +// Print writes the DefaultConfig representation of the given values to standard output. +func Print(vals ...interface{}) { + DefaultConfig.Print(vals...) +} + +// Print writes the configured presentation of the given values to standard output. +func (cfg *Config) Print(vals ...interface{}) { + fmt.Println(cfg.Sprint(vals...)) +} + +// Sprint returns a string representation of the given value according to the DefaultConfig. +func Sprint(vals ...interface{}) string { + return DefaultConfig.Sprint(vals...) +} + +// Sprint returns a string representation of the given value according to cfg. +func (cfg *Config) Sprint(vals ...interface{}) string { + buf := new(bytes.Buffer) + cfg.fprint(buf, vals...) + return buf.String() +} + +// Fprint writes the representation of the given value to the writer according to the DefaultConfig. +func Fprint(w io.Writer, vals ...interface{}) (n int64, err error) { + return DefaultConfig.Fprint(w, vals...) +} + +// Fprint writes the representation of the given value to the writer according to the cfg. +func (cfg *Config) Fprint(w io.Writer, vals ...interface{}) (n int64, err error) { + buf := new(bytes.Buffer) + cfg.fprint(buf, vals...) + return buf.WriteTo(w) +} + +// Compare returns a string containing a line-by-line unified diff of the +// values in a and b, using the CompareConfig. +// +// Each line in the output is prefixed with '+', '-', or ' ' to indicate which +// side it's from. Lines from the a side are marked with '-', lines from the +// b side are marked with '+' and lines that are the same on both sides are +// marked with ' '. +// +// The comparison is based on the intentionally-untyped output of Print, and as +// such this comparison is pretty forviving. In particular, if the types of or +// types within in a and b are different but have the same representation, +// Compare will not indicate any differences between them. +func Compare(a, b interface{}) string { + return CompareConfig.Compare(a, b) +} + +// Compare returns a string containing a line-by-line unified diff of the +// values in got and want according to the cfg. +// +// Each line in the output is prefixed with '+', '-', or ' ' to indicate which +// side it's from. Lines from the a side are marked with '-', lines from the +// b side are marked with '+' and lines that are the same on both sides are +// marked with ' '. +// +// The comparison is based on the intentionally-untyped output of Print, and as +// such this comparison is pretty forviving. In particular, if the types of or +// types within in a and b are different but have the same representation, +// Compare will not indicate any differences between them. +func (cfg *Config) Compare(a, b interface{}) string { + diffCfg := *cfg + diffCfg.Diffable = true + return diff.Diff(cfg.Sprint(a), cfg.Sprint(b)) +} diff --git a/vendor/github.com/kylelemons/godebug/pretty/reflect.go b/vendor/github.com/kylelemons/godebug/pretty/reflect.go new file mode 100644 index 000000000000..5cd30b7f0360 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/reflect.go @@ -0,0 +1,241 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "encoding" + "fmt" + "reflect" + "sort" +) + +func isZeroVal(val reflect.Value) bool { + if !val.CanInterface() { + return false + } + z := reflect.Zero(val.Type()).Interface() + return reflect.DeepEqual(val.Interface(), z) +} + +// pointerTracker is a helper for tracking pointer chasing to detect cycles. +type pointerTracker struct { + addrs map[uintptr]int // addr[address] = seen count + + lastID int + ids map[uintptr]int // ids[address] = id +} + +// track tracks following a reference (pointer, slice, map, etc). Every call to +// track should be paired with a call to untrack. +func (p *pointerTracker) track(ptr uintptr) { + if p.addrs == nil { + p.addrs = make(map[uintptr]int) + } + p.addrs[ptr]++ +} + +// untrack registers that we have backtracked over the reference to the pointer. +func (p *pointerTracker) untrack(ptr uintptr) { + p.addrs[ptr]-- + if p.addrs[ptr] == 0 { + delete(p.addrs, ptr) + } +} + +// seen returns whether the pointer was previously seen along this path. +func (p *pointerTracker) seen(ptr uintptr) bool { + _, ok := p.addrs[ptr] + return ok +} + +// keep allocates an ID for the given address and returns it. +func (p *pointerTracker) keep(ptr uintptr) int { + if p.ids == nil { + p.ids = make(map[uintptr]int) + } + if _, ok := p.ids[ptr]; !ok { + p.lastID++ + p.ids[ptr] = p.lastID + } + return p.ids[ptr] +} + +// id returns the ID for the given address. +func (p *pointerTracker) id(ptr uintptr) (int, bool) { + if p.ids == nil { + p.ids = make(map[uintptr]int) + } + id, ok := p.ids[ptr] + return id, ok +} + +// reflector adds local state to the recursive reflection logic. +type reflector struct { + *Config + *pointerTracker +} + +// follow handles following a possiblly-recursive reference to the given value +// from the given ptr address. +func (r *reflector) follow(ptr uintptr, val reflect.Value) node { + if r.pointerTracker == nil { + // Tracking disabled + return r.val2node(val) + } + + // If a parent already followed this, emit a reference marker + if r.seen(ptr) { + id := r.keep(ptr) + return ref{id} + } + + // Track the pointer we're following while on this recursive branch + r.track(ptr) + defer r.untrack(ptr) + n := r.val2node(val) + + // If the recursion used this ptr, wrap it with a target marker + if id, ok := r.id(ptr); ok { + return target{id, n} + } + + // Otherwise, return the node unadulterated + return n +} + +func (r *reflector) val2node(val reflect.Value) node { + if !val.IsValid() { + return rawVal("nil") + } + + if val.CanInterface() { + v := val.Interface() + if formatter, ok := r.Formatter[val.Type()]; ok { + if formatter != nil { + res := reflect.ValueOf(formatter).Call([]reflect.Value{val}) + return rawVal(res[0].Interface().(string)) + } + } else { + if s, ok := v.(fmt.Stringer); ok && r.PrintStringers { + return stringVal(s.String()) + } + if t, ok := v.(encoding.TextMarshaler); ok && r.PrintTextMarshalers { + if raw, err := t.MarshalText(); err == nil { // if NOT an error + return stringVal(string(raw)) + } + } + } + } + + switch kind := val.Kind(); kind { + case reflect.Ptr: + if val.IsNil() { + return rawVal("nil") + } + return r.follow(val.Pointer(), val.Elem()) + case reflect.Interface: + if val.IsNil() { + return rawVal("nil") + } + return r.val2node(val.Elem()) + case reflect.String: + return stringVal(val.String()) + case reflect.Slice: + n := list{} + length := val.Len() + ptr := val.Pointer() + for i := 0; i < length; i++ { + n = append(n, r.follow(ptr, val.Index(i))) + } + return n + case reflect.Array: + n := list{} + length := val.Len() + for i := 0; i < length; i++ { + n = append(n, r.val2node(val.Index(i))) + } + return n + case reflect.Map: + // Extract the keys and sort them for stable iteration + keys := val.MapKeys() + pairs := make([]mapPair, 0, len(keys)) + for _, key := range keys { + pairs = append(pairs, mapPair{ + key: new(formatter).compactString(r.val2node(key)), // can't be cyclic + value: val.MapIndex(key), + }) + } + sort.Sort(byKey(pairs)) + + // Process the keys into the final representation + ptr, n := val.Pointer(), keyvals{} + for _, pair := range pairs { + n = append(n, keyval{ + key: pair.key, + val: r.follow(ptr, pair.value), + }) + } + return n + case reflect.Struct: + n := keyvals{} + typ := val.Type() + fields := typ.NumField() + for i := 0; i < fields; i++ { + sf := typ.Field(i) + if !r.IncludeUnexported && sf.PkgPath != "" { + continue + } + field := val.Field(i) + if r.SkipZeroFields && isZeroVal(field) { + continue + } + n = append(n, keyval{sf.Name, r.val2node(field)}) + } + return n + case reflect.Bool: + if val.Bool() { + return rawVal("true") + } + return rawVal("false") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rawVal(fmt.Sprintf("%d", val.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rawVal(fmt.Sprintf("%d", val.Uint())) + case reflect.Uintptr: + return rawVal(fmt.Sprintf("0x%X", val.Uint())) + case reflect.Float32, reflect.Float64: + return rawVal(fmt.Sprintf("%v", val.Float())) + case reflect.Complex64, reflect.Complex128: + return rawVal(fmt.Sprintf("%v", val.Complex())) + } + + // Fall back to the default %#v if we can + if val.CanInterface() { + return rawVal(fmt.Sprintf("%#v", val.Interface())) + } + + return rawVal(val.String()) +} + +type mapPair struct { + key string + value reflect.Value +} + +type byKey []mapPair + +func (v byKey) Len() int { return len(v) } +func (v byKey) Swap(i, j int) { v[i], v[j] = v[j], v[i] } +func (v byKey) Less(i, j int) bool { return v[i].key < v[j].key } diff --git a/vendor/github.com/kylelemons/godebug/pretty/structure.go b/vendor/github.com/kylelemons/godebug/pretty/structure.go new file mode 100644 index 000000000000..d876f60cad21 --- /dev/null +++ b/vendor/github.com/kylelemons/godebug/pretty/structure.go @@ -0,0 +1,223 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// a formatter stores stateful formatting information as well as being +// an io.Writer for simplicity. +type formatter struct { + *bufio.Writer + *Config + + // Self-referential structure tracking + tagNumbers map[int]int // tagNumbers[id] = <#n> +} + +// newFormatter creates a new buffered formatter. For the output to be written +// to the given writer, this must be accompanied by a call to write (or Flush). +func newFormatter(cfg *Config, w io.Writer) *formatter { + return &formatter{ + Writer: bufio.NewWriter(w), + Config: cfg, + tagNumbers: make(map[int]int), + } +} + +func (f *formatter) write(n node) { + defer f.Flush() + n.format(f, "") +} + +func (f *formatter) tagFor(id int) int { + if tag, ok := f.tagNumbers[id]; ok { + return tag + } + if f.tagNumbers == nil { + return 0 + } + tag := len(f.tagNumbers) + 1 + f.tagNumbers[id] = tag + return tag +} + +type node interface { + format(f *formatter, indent string) +} + +func (f *formatter) compactString(n node) string { + switch k := n.(type) { + case stringVal: + return string(k) + case rawVal: + return string(k) + } + + buf := new(bytes.Buffer) + f2 := newFormatter(&Config{Compact: true}, buf) + f2.tagNumbers = f.tagNumbers // reuse tagNumbers just in case + f2.write(n) + return buf.String() +} + +type stringVal string + +func (str stringVal) format(f *formatter, indent string) { + f.WriteString(strconv.Quote(string(str))) +} + +type rawVal string + +func (r rawVal) format(f *formatter, indent string) { + f.WriteString(string(r)) +} + +type keyval struct { + key string + val node +} + +type keyvals []keyval + +func (l keyvals) format(f *formatter, indent string) { + f.WriteByte('{') + + switch { + case f.Compact: + // All on one line: + for i, kv := range l { + if i > 0 { + f.WriteByte(',') + } + f.WriteString(kv.key) + f.WriteByte(':') + kv.val.format(f, indent) + } + case f.Diffable: + f.WriteByte('\n') + inner := indent + " " + // Each value gets its own line: + for _, kv := range l { + f.WriteString(inner) + f.WriteString(kv.key) + f.WriteString(": ") + kv.val.format(f, inner) + f.WriteString(",\n") + } + f.WriteString(indent) + default: + keyWidth := 0 + for _, kv := range l { + if kw := len(kv.key); kw > keyWidth { + keyWidth = kw + } + } + alignKey := indent + " " + alignValue := strings.Repeat(" ", keyWidth) + inner := alignKey + alignValue + " " + // First and last line shared with bracket: + for i, kv := range l { + if i > 0 { + f.WriteString(",\n") + f.WriteString(alignKey) + } + f.WriteString(kv.key) + f.WriteString(": ") + f.WriteString(alignValue[len(kv.key):]) + kv.val.format(f, inner) + } + } + + f.WriteByte('}') +} + +type list []node + +func (l list) format(f *formatter, indent string) { + if max := f.ShortList; max > 0 { + short := f.compactString(l) + if len(short) <= max { + f.WriteString(short) + return + } + } + + f.WriteByte('[') + + switch { + case f.Compact: + // All on one line: + for i, v := range l { + if i > 0 { + f.WriteByte(',') + } + v.format(f, indent) + } + case f.Diffable: + f.WriteByte('\n') + inner := indent + " " + // Each value gets its own line: + for _, v := range l { + f.WriteString(inner) + v.format(f, inner) + f.WriteString(",\n") + } + f.WriteString(indent) + default: + inner := indent + " " + // First and last line shared with bracket: + for i, v := range l { + if i > 0 { + f.WriteString(",\n") + f.WriteString(inner) + } + v.format(f, inner) + } + } + + f.WriteByte(']') +} + +type ref struct { + id int +} + +func (r ref) format(f *formatter, indent string) { + fmt.Fprintf(f, "", f.tagFor(r.id)) +} + +type target struct { + id int + value node +} + +func (t target) format(f *formatter, indent string) { + tag := fmt.Sprintf("<#%d> ", f.tagFor(t.id)) + switch { + case f.Diffable, f.Compact: + // no indent changes + default: + indent += strings.Repeat(" ", len(tag)) + } + f.WriteString(tag) + t.value.format(f, indent) +} diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore index 8081bd0ffbf6..12ecd6ae9b7e 100644 --- a/vendor/github.com/minio/minio-go/v7/.gitignore +++ b/vendor/github.com/minio/minio-go/v7/.gitignore @@ -1,4 +1,5 @@ *~ *.test validator -golangci-lint \ No newline at end of file +golangci-lint +functional_tests \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml index dfc0c2d53773..875b949c6dd1 100644 --- a/vendor/github.com/minio/minio-go/v7/.golangci.yml +++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml @@ -12,8 +12,7 @@ linters: - govet - ineffassign - gosimple - - deadcode - - structcheck + - unused - gocritic issues: @@ -25,3 +24,4 @@ issues: - "captLocal:" - "ifElseChain:" - "elseif:" + - "should have a package comment" diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile index ac4a328f0e3c..ace666705427 100644 --- a/vendor/github.com/minio/minio-go/v7/Makefile +++ b/vendor/github.com/minio/minio-go/v7/Makefile @@ -9,7 +9,7 @@ checks: lint vet test examples functional-test lint: @mkdir -p ${GOPATH}/bin - @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.45.2 + @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin @echo "Running $@ check" @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml @@ -27,7 +27,8 @@ examples: @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) functional-test: - @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go + @GO111MODULE=on go build -race functional_tests.go + @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests clean: @echo "Cleaning up all the generated files" diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md index 4e3abf71d2d5..9b6bbbec342c 100644 --- a/vendor/github.com/minio/minio-go/v7/README.md +++ b/vendor/github.com/minio/minio-go/v7/README.md @@ -2,7 +2,7 @@ The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. -This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference). +This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html). This document assumes that you have a working [Go development environment](https://golang.org/doc/install). @@ -126,53 +126,53 @@ mc ls play/mymusic/ ## API Reference The full API Reference is available here. -* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference) +* [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html) ### API Reference : Bucket Operations -* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) -* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) +* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) +* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) +* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) +* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) +* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) +* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) ### API Reference : Bucket policy Operations -* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) +* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) +* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) ### API Reference : Bucket notification Operations -* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) -* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO Extension) +* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) +* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension) +* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension) ### API Reference : File Object Operations -* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject) +* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) +* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject) ### API Reference : Object Operations -* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) -* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) +* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) +* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) +* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) +* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) +* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) +* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) +* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) +* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) ### API Reference : Presigned Operations -* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) +* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) +* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) +* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) +* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) ### API Reference : Client custom settings -* [`SetAppInfo`](https://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`TraceOn`](https://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](https://docs.min.io/docs/golang-client-api-reference#TraceOff) +* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) +* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) +* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) ## Full Examples @@ -236,8 +236,8 @@ The full API Reference is available here. * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) ## Explore Further -* [Complete Documentation](https://docs.min.io) -* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference) +* [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html) +* [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html) ## Contribute [Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) diff --git a/vendor/github.com/minio/minio-go/v7/README_zh_CN.md b/vendor/github.com/minio/minio-go/v7/README_zh_CN.md index 64e79341196a..97087c61e0c0 100644 --- a/vendor/github.com/minio/minio-go/v7/README_zh_CN.md +++ b/vendor/github.com/minio/minio-go/v7/README_zh_CN.md @@ -14,7 +14,7 @@ MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对 - Ceph Object Gateway - Riak CS -本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。 +本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html)。 本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。 @@ -140,52 +140,52 @@ mc ls play/mymusic/ ## API文档 完整的API文档在这里。 -* [完整API文档](https://docs.min.io/docs/golang-client-api-reference) +* [完整API文档](https://min.io/docs/minio/linux/developers/go/API.html) ### API文档 : 操作存储桶 -* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) -* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) +* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) +* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) +* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) +* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) +* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) +* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) ### API文档 : 存储桶策略 -* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) +* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) +* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) ### API文档 : 存储桶通知 -* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO 扩展) -* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO 扩展) +* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) +* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO 扩展) +* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO 扩展) ### API文档 : 操作文件对象 -* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) +* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) +* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) ### API文档 : 操作对象 -* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) -* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) +* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) +* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) +* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) +* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) +* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) +* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) +* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) +* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) ### API文档 : Presigned操作 -* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) +* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) +* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) +* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) +* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) ### API文档 : 客户端自定义设置 -* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) +* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) +* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) +* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) ## 完整示例 @@ -253,8 +253,8 @@ mc ls play/mymusic/ * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) ## 了解更多 -* [完整文档](https://docs.min.io) -* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference) +* [完整文档](https://min.io/docs/minio/kubernetes/upstream/index.html) +* [MinIO Go Client SDK API文档](https://min.io/docs/minio/linux/developers/go/API.html) ## 贡献 [贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index b59924a3d514..835c8bd8ad51 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -221,7 +221,7 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag) } if dstOpts.Internal.ReplicationRequest { - headers.Set(minIOBucketReplicationRequest, "") + headers.Set(minIOBucketReplicationRequest, "true") } if !dstOpts.Internal.LegalholdTimestamp.IsZero() { headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go index 09b03af26cce..1f4793877725 100644 --- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go @@ -45,19 +45,20 @@ type StringMap map[string]string // on the first line is initialize it. func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { *m = StringMap{} - type xmlMapEntry struct { - XMLName xml.Name - Value string `xml:",chardata"` + type Item struct { + Key string + Value string } for { - var e xmlMapEntry + var e Item err := d.Decode(&e) if err == io.EOF { break - } else if err != nil { + } + if err != nil { return err } - (*m)[e.XMLName.Local] = e.Value + (*m)[e.Key] = e.Value } return nil } @@ -84,6 +85,14 @@ type UploadInfo struct { // not to be confused with `Expires` HTTP header. Expiration time.Time ExpirationRuleID string + + // Verified checksum values, if any. + // Values are base64 (standard) encoded. + // For multipart objects this is a checksum of the checksum of each part. + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string } // RestoreInfo contains information of the restore operation of an archived object @@ -112,7 +121,7 @@ type ObjectInfo struct { Metadata http.Header `json:"metadata" xml:"-"` // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. - UserMetadata StringMap `json:"userMetadata"` + UserMetadata StringMap `json:"userMetadata,omitempty"` // x-amz-tagging values in their k/v values. UserTags map[string]string `json:"userTags"` @@ -140,7 +149,8 @@ type ObjectInfo struct { // - FAILED // - REPLICA (on the destination) ReplicationStatus string `xml:"ReplicationStatus"` - + // set to true if delete marker has backing object version on target, and eligible to replicate + ReplicationReady bool // Lifecycle expiry-date and ruleID associated with the expiry // not to be confused with `Expires` HTTP header. Expiration time.Time @@ -148,6 +158,12 @@ type ObjectInfo struct { Restore *RestoreInfo + // Checksum values + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + // Error Err error `json:"-"` } diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go index dd781cae33b1..33ca394588ea 100644 --- a/vendor/github.com/minio/minio-go/v7/api-error-response.go +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -67,14 +67,14 @@ type ErrorResponse struct { // // For example: // -// import s3 "github.com/minio/minio-go/v7" -// ... -// ... -// reader, stat, err := s3.GetObject(...) -// if err != nil { -// resp := s3.ToErrorResponse(err) -// } -// ... +// import s3 "github.com/minio/minio-go/v7" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// } +// ... func ToErrorResponse(err error) ErrorResponse { switch err := err.(type) { case ErrorResponse: diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go index 8706caaac4bf..b17f3146b8b1 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go @@ -45,9 +45,7 @@ func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, o // Detect if snowball is server location we are talking to. var snowball bool if location, ok := c.bucketLocCache.Get(bucketName); ok { - if location == "snowball" { - snowball = true - } + snowball = location == "snowball" } var ( @@ -64,165 +62,46 @@ func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, o // This routine feeds partial object data as and when the caller reads. go func() { - defer close(reqCh) defer close(resCh) + defer func() { + // Close the http response body before returning. + // This ends the connection with the server. + if httpReader != nil { + httpReader.Close() + } + }() defer cancel() // Used to verify if etag of object has changed since last read. var etag string - // Loop through the incoming control messages and read data. - for { - select { - // When context is closed exit our routine. - case <-gctx.Done(): - // Close the http response body before returning. - // This ends the connection with the server. - if httpReader != nil { - httpReader.Close() - } - return - - // Gather incoming request. - case req, ok := <-reqCh: - if !ok { - return - } - // If this is the first request we may not need to do a getObject request yet. - if req.isFirstReq { - // First request is a Read/ReadAt. - if req.isReadOp { - // Differentiate between wanting the whole object and just a range. - if req.isReadAt { - // If this is a ReadAt request only get the specified range. - // Range is set with respect to the offset and length of the buffer requested. - // Do not set objectInfo from the first readAt request because it will not get - // the whole object. - opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - } else if req.Offset > 0 { - opts.SetRange(req.Offset, 0) - } - httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts) - if err != nil { - resCh <- getResponse{Error: err} - return - } - etag = objectInfo.ETag - // Read at least firstReq.Buffer bytes, if not we have - // reached our EOF. - size, err := readFull(httpReader, req.Buffer) - totalRead += size - if size > 0 && err == io.ErrUnexpectedEOF { - if int64(size) < objectInfo.Size { - // In situations when returned size - // is less than the expected content - // length set by the server, make sure - // we return io.ErrUnexpectedEOF - err = io.ErrUnexpectedEOF - } else { - // If an EOF happens after reading some but not - // all the bytes ReadFull returns ErrUnexpectedEOF - err = io.EOF - } - } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { - // Special cases when server writes more data - // than the content-length, net/http response - // body returns an error, instead of converting - // it to io.EOF - return unexpected EOF. - err = io.ErrUnexpectedEOF - } - // Send back the first response. - resCh <- getResponse{ - objectInfo: objectInfo, - Size: size, - Error: err, - didRead: true, - } - } else { - // First request is a Stat or Seek call. - // Only need to run a StatObject until an actual Read or ReadAt request comes through. - - // Remove range header if already set, for stat Operations to get original file size. - delete(opts.headers, "Range") - objectInfo, err = c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts)) - if err != nil { - resCh <- getResponse{ - Error: err, - } - // Exit the go-routine. - return - } - etag = objectInfo.ETag - // Send back the first response. - resCh <- getResponse{ - objectInfo: objectInfo, - } + for req := range reqCh { + // If this is the first request we may not need to do a getObject request yet. + if req.isFirstReq { + // First request is a Read/ReadAt. + if req.isReadOp { + // Differentiate between wanting the whole object and just a range. + if req.isReadAt { + // If this is a ReadAt request only get the specified range. + // Range is set with respect to the offset and length of the buffer requested. + // Do not set objectInfo from the first readAt request because it will not get + // the whole object. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { + opts.SetRange(req.Offset, 0) } - } else if req.settingObjectInfo { // Request is just to get objectInfo. - // Remove range header if already set, for stat Operations to get original file size. - delete(opts.headers, "Range") - // Check whether this is snowball - // if yes do not use If-Match feature - // it doesn't work. - if etag != "" && !snowball { - opts.SetMatchETag(etag) - } - objectInfo, err := c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts)) + httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts) if err != nil { - resCh <- getResponse{ - Error: err, - } - // Exit the goroutine. + resCh <- getResponse{Error: err} return } - // Send back the objectInfo. - resCh <- getResponse{ - objectInfo: objectInfo, - } - } else { - // Offset changes fetch the new object at an Offset. - // Because the httpReader may not be set by the first - // request if it was a stat or seek it must be checked - // if the object has been read or not to only initialize - // new ones when they haven't been already. - // All readAt requests are new requests. - if req.DidOffsetChange || !req.beenRead { - // Check whether this is snowball - // if yes do not use If-Match feature - // it doesn't work. - if etag != "" && !snowball { - opts.SetMatchETag(etag) - } - if httpReader != nil { - // Close previously opened http reader. - httpReader.Close() - } - // If this request is a readAt only get the specified range. - if req.isReadAt { - // Range is set with respect to the offset and length of the buffer requested. - opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - } else if req.Offset > 0 { // Range is set with respect to the offset. - opts.SetRange(req.Offset, 0) - } else { - // Remove range header if already set - delete(opts.headers, "Range") - } - httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts) - if err != nil { - resCh <- getResponse{ - Error: err, - } - return - } - totalRead = 0 - } - - // Read at least req.Buffer bytes, if not we have + etag = objectInfo.ETag + // Read at least firstReq.Buffer bytes, if not we have // reached our EOF. size, err := readFull(httpReader, req.Buffer) totalRead += size if size > 0 && err == io.ErrUnexpectedEOF { - if int64(totalRead) < objectInfo.Size { + if int64(size) < objectInfo.Size { // In situations when returned size // is less than the expected content // length set by the server, make sure @@ -240,15 +119,123 @@ func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, o // it to io.EOF - return unexpected EOF. err = io.ErrUnexpectedEOF } - - // Reply back how much was read. + // Send back the first response. resCh <- getResponse{ + objectInfo: objectInfo, Size: size, Error: err, didRead: true, + } + } else { + // First request is a Stat or Seek call. + // Only need to run a StatObject until an actual Read or ReadAt request comes through. + + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + objectInfo, err = c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the go-routine. + return + } + etag = objectInfo.ETag + // Send back the first response. + resCh <- getResponse{ objectInfo: objectInfo, } } + } else if req.settingObjectInfo { // Request is just to get objectInfo. + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + // Check whether this is snowball + // if yes do not use If-Match feature + // it doesn't work. + if etag != "" && !snowball { + opts.SetMatchETag(etag) + } + objectInfo, err := c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the goroutine. + return + } + // Send back the objectInfo. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } else { + // Offset changes fetch the new object at an Offset. + // Because the httpReader may not be set by the first + // request if it was a stat or seek it must be checked + // if the object has been read or not to only initialize + // new ones when they haven't been already. + // All readAt requests are new requests. + if req.DidOffsetChange || !req.beenRead { + // Check whether this is snowball + // if yes do not use If-Match feature + // it doesn't work. + if etag != "" && !snowball { + opts.SetMatchETag(etag) + } + if httpReader != nil { + // Close previously opened http reader. + httpReader.Close() + } + // If this request is a readAt only get the specified range. + if req.isReadAt { + // Range is set with respect to the offset and length of the buffer requested. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { // Range is set with respect to the offset. + opts.SetRange(req.Offset, 0) + } else { + // Remove range header if already set + delete(opts.headers, "Range") + } + httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{ + Error: err, + } + return + } + totalRead = 0 + } + + // Read at least req.Buffer bytes, if not we have + // reached our EOF. + size, err := readFull(httpReader, req.Buffer) + totalRead += size + if size > 0 && err == io.ErrUnexpectedEOF { + if int64(totalRead) < objectInfo.Size { + // In situations when returned size + // is less than the expected content + // length set by the server, make sure + // we return io.ErrUnexpectedEOF + err = io.ErrUnexpectedEOF + } else { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { + // Special cases when server writes more data + // than the content-length, net/http response + // body returns an error, instead of converting + // it to io.EOF - return unexpected EOF. + err = io.ErrUnexpectedEOF + } + + // Reply back how much was read. + resCh <- getResponse{ + Size: size, + Error: err, + didRead: true, + objectInfo: objectInfo, + } } } }() @@ -615,6 +602,7 @@ func (o *Object) Close() (err error) { if o == nil { return errInvalidArgument("Object is nil") } + // Locking. o.mutex.Lock() defer o.mutex.Unlock() @@ -627,6 +615,9 @@ func (o *Object) Close() (err error) { // Close successfully. o.cancel() + // Close the request channel to indicate the internal go-routine to exit. + close(o.reqCh) + // Save for future operations. errMsg := "Object is already closed. Bad file descriptor." o.prevErr = errors.New(errMsg) diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go index 184ef9f86817..0082c1fa7623 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-options.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go @@ -27,8 +27,9 @@ import ( // AdvancedGetOptions for internal use by MinIO server - not intended for client use. type AdvancedGetOptions struct { - ReplicationDeleteMarker bool - ReplicationProxyRequest string + ReplicationDeleteMarker bool + IsReplicationReadyForDeleteMarker bool + ReplicationProxyRequest string } // GetObjectOptions are used to specify additional headers or options @@ -38,6 +39,12 @@ type GetObjectOptions struct { ServerSideEncryption encrypt.ServerSide VersionID string PartNumber int + + // Include any checksums, if object was uploaded with checksum. + // For multipart objects this is a checksum of part checksums. + // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + Checksum bool + // To be not used by external applications Internal AdvancedGetOptions } @@ -60,6 +67,9 @@ func (o GetObjectOptions) Header() http.Header { if o.Internal.ReplicationProxyRequest != "" { headers.Set(minIOBucketReplicationProxyRequest, o.Internal.ReplicationProxyRequest) } + if o.Checksum { + headers.Set("x-amz-checksum-mode", "ENABLED") + } return headers } diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go index b624b07786e0..d216afb39727 100644 --- a/vendor/github.com/minio/minio-go/v7/api-list.go +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -32,11 +32,10 @@ import ( // This call requires explicit authentication, no anonymous requests are // allowed for listing buckets. // -// api := client.New(....) -// for message := range api.ListBuckets(context.Background()) { -// fmt.Println(message) -// } -// +// api := client.New(....) +// for message := range api.ListBuckets(context.Background()) { +// fmt.Println(message) +// } func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { // Execute GET on service. resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) @@ -71,21 +70,28 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List // Return object owner information by default fetchOwner := true + sendObjectInfo := func(info ObjectInfo) { + select { + case objectStatCh <- info: + case <-ctx.Done(): + } + } + // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } // Validate incoming object prefix. if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } @@ -99,9 +105,9 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken, fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers) if err != nil { - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return } @@ -138,6 +144,14 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List if !result.IsTruncated { return } + + // Add this to catch broken S3 API implementations. + if continuationToken == "" { + sendObjectInfo(ObjectInfo{ + Err: fmt.Errorf("listObjectsV2 is truncated without continuationToken, %s S3 server is incompatible with S3 API", c.endpointURL), + }) + return + } } }(objectStatCh) return objectStatCh @@ -263,20 +277,28 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb // If recursive we do not delimit. delimiter = "" } + + sendObjectInfo := func(info ObjectInfo) { + select { + case objectStatCh <- info: + case <-ctx.Done(): + } + } + // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } // Validate incoming object prefix. if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(objectStatCh) - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return objectStatCh } @@ -289,9 +311,9 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb // Get list of objects a maximum of 1000 per request. result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers) if err != nil { - objectStatCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return } @@ -344,21 +366,28 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts delimiter = "" } + sendObjectInfo := func(info ObjectInfo) { + select { + case resultCh <- info: + case <-ctx.Done(): + } + } + // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { defer close(resultCh) - resultCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return resultCh } // Validate incoming object prefix. if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { defer close(resultCh) - resultCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return resultCh } @@ -375,9 +404,9 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts // Get list of objects a maximum of 1000 per request. result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers) if err != nil { - resultCh <- ObjectInfo{ + sendObjectInfo(ObjectInfo{ Err: err, - } + }) return } @@ -659,11 +688,10 @@ func (o *ListObjectsOptions) Set(key, value string) { // ListObjects returns objects list after evaluating the passed options. // -// api := client.New(....) -// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { -// fmt.Println(object) -// } -// +// api := client.New(....) +// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { +// fmt.Println(object) +// } func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { if opts.WithVersions { return c.listObjectVersions(ctx, bucketName, opts) @@ -694,12 +722,12 @@ func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListOb // If you enable recursive as 'true' this function will return back all // the multipart objects in a given bucket name. // -// api := client.New(....) -// // Recurively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { -// fmt.Println(message) -// } +// api := client.New(....) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) } @@ -916,7 +944,7 @@ func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName strin } // listObjectPartsQuery (List Parts query) -// - lists some or all (up to 1000) parts that have been uploaded +// - lists some or all (up to 1000) parts that have been uploaded // for a specific multipart upload // // You can use the request parameters as selection criteria to return diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go index 149a536e42f0..7fad7600c3d9 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go @@ -65,10 +65,9 @@ func isReadAt(reader io.Reader) (ok bool) { // NOTE: Assumption here is that for any object to be uploaded to any S3 compatible // object storage it will have the following parameters as constants. // -// maxPartsCount - 10000 -// minPartSize - 16MiB -// maxMultipartPutObjectSize - 5TiB -// +// maxPartsCount - 10000 +// minPartSize - 16MiB +// maxMultipartPutObjectSize - 5TiB func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { // object size is '-1' set it to 5TiB. var unknownSize bool diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index abcbd298198a..3c9a13ff20b3 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -24,6 +24,7 @@ import ( "encoding/hex" "encoding/xml" "fmt" + "hash/crc32" "io" "io/ioutil" "net/http" @@ -79,11 +80,23 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj return UploadInfo{}, err } + // Choose hash algorithms to be calculated by hashCopyN, + // avoid sha256 with non-v4 signature request or + // HTTPS connection. + hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256) + if len(hashSums) == 0 { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + } + // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") defer func() { if err != nil { @@ -100,12 +113,12 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj // Create a buffer. buf := make([]byte, partSize) + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + var crcBytes []byte + customHeader := make(http.Header) + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) for partNumber <= totalPartsCount { - // Choose hash algorithms to be calculated by hashCopyN, - // avoid sha256 with non-v4 signature request or - // HTTPS connection. - hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256) - length, rErr := readFull(reader, buf) if rErr == io.EOF && partNumber > 1 { break @@ -131,18 +144,24 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj md5Base64 string sha256Hex string ) + if hashSums["md5"] != nil { md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) } if hashSums["sha256"] != nil { sha256Hex = hex.EncodeToString(hashSums["sha256"]) } + if len(hashSums) == 0 { + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) + crcBytes = append(crcBytes, cSum...) + } + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - md5Base64, sha256Hex, int64(length), - opts.ServerSideEncryption, - !opts.DisableContentSha256) + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } @@ -171,15 +190,25 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, }) } // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) + opts = PutObjectOptions{} + if len(crcBytes) > 0 { + // Add hash of hashes. + crc.Reset() + crc.Write(crcBytes) + opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err } @@ -241,57 +270,73 @@ func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, object return initiateMultipartUploadResult, nil } +type uploadPartParams struct { + bucketName string + objectName string + uploadID string + reader io.Reader + partNumber int + md5Base64 string + sha256Hex string + size int64 + sse encrypt.ServerSide + streamSha256 bool + customHeader http.Header + trailer http.Header +} + // uploadPart - Uploads a part in a multipart upload. -func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, - partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide, streamSha256 bool, -) (ObjectPart, error) { +func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { + if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { return ObjectPart{}, err } - if err := s3utils.CheckValidObjectName(objectName); err != nil { + if err := s3utils.CheckValidObjectName(p.objectName); err != nil { return ObjectPart{}, err } - if size > maxPartSize { - return ObjectPart{}, errEntityTooLarge(size, maxPartSize, bucketName, objectName) + if p.size > maxPartSize { + return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) } - if size <= -1 { - return ObjectPart{}, errEntityTooSmall(size, bucketName, objectName) + if p.size <= -1 { + return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) } - if partNumber <= 0 { + if p.partNumber <= 0 { return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") } - if uploadID == "" { + if p.uploadID == "" { return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") } // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number. - urlValues.Set("partNumber", strconv.Itoa(partNumber)) + urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) // Set upload id. - urlValues.Set("uploadId", uploadID) + urlValues.Set("uploadId", p.uploadID) // Set encryption headers, if any. - customHeader := make(http.Header) + if p.customHeader == nil { + p.customHeader = make(http.Header) + } // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html // Server-side encryption is supported by the S3 Multipart Upload actions. // Unless you are using a customer-provided encryption key, you don't need // to specify the encryption parameters in each UploadPart request. - if sse != nil && sse.Type() == encrypt.SSEC { - sse.Marshal(customHeader) + if p.sse != nil && p.sse.Type() == encrypt.SSEC { + p.sse.Marshal(p.customHeader) } reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, + bucketName: p.bucketName, + objectName: p.objectName, queryValues: urlValues, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Base64: md5Base64, - contentSHA256Hex: sha256Hex, - streamSha256: streamSha256, + customHeader: p.customHeader, + contentBody: p.reader, + contentLength: p.size, + contentMD5Base64: p.md5Base64, + contentSHA256Hex: p.sha256Hex, + streamSha256: p.streamSha256, + trailer: p.trailer, } // Execute PUT on each part. @@ -302,15 +347,21 @@ func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadI } if resp != nil { if resp.StatusCode != http.StatusOK { - return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) + return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) } } // Once successfully uploaded, return completed part. - objPart := ObjectPart{} - objPart.Size = size - objPart.PartNumber = partNumber + h := resp.Header + objPart := ObjectPart{ + ChecksumCRC32: h.Get("x-amz-checksum-crc32"), + ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), + ChecksumSHA1: h.Get("x-amz-checksum-sha1"), + ChecksumSHA256: h.Get("x-amz-checksum-sha256"), + } + objPart.Size = p.size + objPart.PartNumber = p.partNumber // Trim off the odd double quotes from ETag in the beginning and end. - objPart.ETag = trimEtag(resp.Header.Get("ETag")) + objPart.ETag = trimEtag(h.Get("ETag")) return objPart, nil } @@ -397,5 +448,10 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object Location: completeMultipartUploadResult.Location, Expiration: expTime, ExpirationRuleID: ruleID, + + ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, + ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, + ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, + ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index 11b3a5255fbf..e3a14c59d8e8 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -22,6 +22,7 @@ import ( "context" "encoding/base64" "fmt" + "hash/crc32" "io" "net/http" "net/url" @@ -38,9 +39,8 @@ import ( // // Following code handles these types of readers. // -// - *minio.Object -// - Any reader which has a method 'ReadAt()' -// +// - *minio.Object +// - Any reader which has a method 'ReadAt()' func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions, ) (info UploadInfo, err error) { @@ -107,11 +107,19 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN return UploadInfo{}, err } + withChecksum := c.trailingHeaderSupport + if withChecksum { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") // Aborts the multipart upload in progress, if the // function returns any error, since we do not resume @@ -177,19 +185,33 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // As a special case if partNumber is lastPartNumber, we // calculate the offset based on the last part size. if uploadReq.PartNum == lastPartNumber { - readOffset = (size - lastPartSize) + readOffset = size - lastPartSize partSize = lastPartSize } sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) + var trailer = make(http.Header, 1) + if withChecksum { + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) + sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) { + trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) + }) + } // Proceed to upload the part. - objPart, err := c.uploadPart(ctx, bucketName, objectName, - uploadID, sectionReader, uploadReq.PartNum, - "", "", partSize, - opts.ServerSideEncryption, - !opts.DisableContentSha256, - ) + p := uploadPartParams{bucketName: bucketName, + objectName: objectName, + uploadID: uploadID, + reader: sectionReader, + partNumber: uploadReq.PartNum, + size: partSize, + sse: opts.ServerSideEncryption, + streamSha256: !opts.DisableContentSha256, + sha256Hex: "", + trailer: trailer, + } + objPart, err := c.uploadPart(ctx, p) if err != nil { uploadedPartsCh <- uploadedPartRes{ Error: err, @@ -226,8 +248,12 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // Update the totalUploadedSize. totalUploadedSize += uploadRes.Size complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: uploadRes.Part.ETag, - PartNumber: uploadRes.Part.PartNumber, + ETag: uploadRes.Part.ETag, + PartNumber: uploadRes.Part.PartNumber, + ChecksumCRC32: uploadRes.Part.ChecksumCRC32, + ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C, + ChecksumSHA1: uploadRes.Part.ChecksumSHA1, + ChecksumSHA256: uploadRes.Part.ChecksumSHA256, }) } } @@ -240,6 +266,18 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) + if withChecksum { + // Add hash of hashes. + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + for _, part := range complMultipartUpload.Parts { + cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C) + if err == nil { + crc.Write(cs) + } + } + opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) if err != nil { return UploadInfo{}, err @@ -260,6 +298,13 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b return UploadInfo{}, err } + if !opts.SendContentMd5 { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + } + // Calculate the optimal parts info for a given size. totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) if err != nil { @@ -270,6 +315,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b if err != nil { return UploadInfo{}, err } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") // Aborts the multipart upload if the function returns // any error, since we do not resume we should purge @@ -281,6 +327,14 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b } }() + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + var crcBytes []byte + customHeader := make(http.Header) + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + md5Hash := c.md5Hasher() + defer md5Hash.Close() + // Total data read and written to server. should be equal to 'size' at the end of the call. var totalUploadedSize int64 @@ -292,7 +346,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b // Avoid declaring variables in the for loop var md5Base64 string - var hookReader io.Reader // Part number always starts with '1'. var partNumber int @@ -303,37 +356,34 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b partSize = lastPartSize } - if opts.SendContentMd5 { - length, rerr := readFull(reader, buf) - if rerr == io.EOF && partNumber > 1 { - break - } - - if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { - return UploadInfo{}, rerr - } + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + break + } - // Calculate md5sum. - hash := c.md5Hasher() - hash.Write(buf[:length]) - md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) - hash.Close() + if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + return UploadInfo{}, rerr + } - // Update progress reader appropriately to the latest offset - // as we read from the source. - hookReader = newHook(bytes.NewReader(buf[:length]), opts.Progress) + // Calculate md5sum. + if opts.SendContentMd5 { + md5Hash.Reset() + md5Hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) } else { - // Update progress reader appropriately to the latest offset - // as we read from the source. - hookReader = newHook(reader, opts.Progress) + // Add CRC32C instead. + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) + crcBytes = append(crcBytes, cSum...) } - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, - io.LimitReader(hookReader, partSize), - partNumber, md5Base64, "", partSize, - opts.ServerSideEncryption, - !opts.DisableContentSha256, - ) + // Update progress reader appropriately to the latest offset + // as we read from the source. + hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } @@ -363,15 +413,26 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, }) } // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) + opts = PutObjectOptions{} + if len(crcBytes) > 0 { + // Add hash of hashes. + crc.Reset() + crc.Write(crcBytes) + opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err } @@ -401,6 +462,7 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") } + var readSeeker io.Seeker if size > 0 { if isReadAt(reader) && !isObject(reader) { seeker, ok := reader.(io.Seeker) @@ -410,35 +472,49 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r return UploadInfo{}, errInvalidArgument(err.Error()) } reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) + readSeeker = reader.(io.Seeker) } } } var md5Base64 string if opts.SendContentMd5 { - // Create a buffer. - buf := make([]byte, size) + // Calculate md5sum. + hash := c.md5Hasher() - length, rErr := readFull(reader, buf) - if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { - return UploadInfo{}, rErr + if readSeeker != nil { + if _, err := io.Copy(hash, reader); err != nil { + return UploadInfo{}, err + } + // Seek back to beginning of io.NewSectionReader's offset. + _, err = readSeeker.Seek(0, io.SeekStart) + if err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + } else { + // Create a buffer. + buf := make([]byte, size) + + length, err := readFull(reader, buf) + if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { + return UploadInfo{}, err + } + + hash.Write(buf[:length]) + reader = bytes.NewReader(buf[:length]) } - // Calculate md5sum. - hash := c.md5Hasher() - hash.Write(buf[:length]) md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) - reader = bytes.NewReader(buf[:length]) hash.Close() } // Update progress reader appropriately to the latest offset as we // read from the source. - readSeeker := newHook(reader, opts.Progress) + progressReader := newHook(reader, opts.Progress) // This function does not calculate sha256 and md5sum for payload. // Execute put object. - return c.putObjectDo(ctx, bucketName, objectName, readSeeker, md5Base64, "", size, opts) + return c.putObjectDo(ctx, bucketName, objectName, progressReader, md5Base64, "", size, opts) } // putObjectDo - executes the put object http operation. @@ -490,14 +566,20 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, // extract lifecycle expiry date and rule ID expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) - + h := resp.Header return UploadInfo{ Bucket: bucketName, Key: objectName, - ETag: trimEtag(resp.Header.Get("ETag")), - VersionID: resp.Header.Get(amzVersionID), + ETag: trimEtag(h.Get("ETag")), + VersionID: h.Get(amzVersionID), Size: size, Expiration: expTime, ExpirationRuleID: ruleID, + + // Checksum values + ChecksumCRC32: h.Get("x-amz-checksum-crc32"), + ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), + ChecksumSHA1: h.Get("x-amz-checksum-sha1"), + ChecksumSHA256: h.Get("x-amz-checksum-sha256"), }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 9328fb6c1026..5735bee5eb2a 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -23,6 +23,7 @@ import ( "encoding/base64" "errors" "fmt" + "hash/crc32" "io" "net/http" "sort" @@ -158,7 +159,7 @@ func (opts PutObjectOptions) Header() (header http.Header) { header.Set(minIOBucketSourceETag, opts.Internal.SourceETag) } if opts.Internal.ReplicationRequest { - header.Set(minIOBucketReplicationRequest, "") + header.Set(minIOBucketReplicationRequest, "true") } if !opts.Internal.LegalholdTimestamp.IsZero() { header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) @@ -215,18 +216,18 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // // You must have WRITE permissions on a bucket to create an object. // -// - For size smaller than 16MiB PutObject automatically does a -// single atomic PUT operation. +// - For size smaller than 16MiB PutObject automatically does a +// single atomic PUT operation. // -// - For size larger than 16MiB PutObject automatically does a -// multipart upload operation. +// - For size larger than 16MiB PutObject automatically does a +// multipart upload operation. // -// - For size input as -1 PutObject does a multipart Put operation -// until input stream reaches EOF. Maximum object size that can -// be uploaded through this operation will be 5TiB. +// - For size input as -1 PutObject does a multipart Put operation +// until input stream reaches EOF. Maximum object size that can +// be uploaded through this operation will be 5TiB. // -// WARNING: Passing down '-1' will use memory and these cannot -// be reused for best outcomes for PutObject(), pass the size always. +// WARNING: Passing down '-1' will use memory and these cannot +// be reused for best outcomes for PutObject(), pass the size always. // // NOTE: Upon errors during upload multipart operation is entirely aborted. func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, @@ -268,6 +269,9 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str } if size < 0 { + if opts.DisableMultipart { + return UploadInfo{}, errors.New("no length provided and multipart disabled") + } return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) } @@ -299,11 +303,20 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam if err != nil { return UploadInfo{}, err } + + if !opts.SendContentMd5 { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + } + // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") defer func() { if err != nil { @@ -320,6 +333,12 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam // Create a buffer. buf := make([]byte, partSize) + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + var crcBytes []byte + customHeader := make(http.Header) + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + for partNumber <= totalPartsCount { length, rerr := readFull(reader, buf) if rerr == io.EOF && partNumber > 1 { @@ -337,6 +356,12 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam hash.Write(buf[:length]) md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) hash.Close() + } else { + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) + crcBytes = append(crcBytes, cSum...) } // Update progress reader appropriately to the latest offset @@ -344,11 +369,8 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - md5Base64, "", int64(length), - opts.ServerSideEncryption, - !opts.DisableContentSha256, - ) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } @@ -377,15 +399,26 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, }) } // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) + opts = PutObjectOptions{} + if len(crcBytes) > 0 { + // Add hash of hashes. + crc.Reset() + crc.Write(crcBytes) + opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go index 0fee90226b68..c0ff31a5bd28 100644 --- a/vendor/github.com/minio/minio-go/v7/api-remove.go +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -82,8 +82,8 @@ func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, // RemoveBucket deletes the bucket name. // -// All objects (including all object versions and delete markers). -// in the bucket must be deleted before successfully attempting this request. +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -166,7 +166,7 @@ func (c *Client) removeObject(ctx context.Context, bucketName, objectName string headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) } if opts.Internal.ReplicationRequest { - headers.Set(minIOBucketReplicationRequest, "") + headers.Set(minIOBucketReplicationRequest, "true") } if opts.ForceDelete { headers.Set(minIOForceDelete, "true") diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go index 592d4cdccb12..827395ee1253 100644 --- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -261,6 +261,12 @@ type ObjectPart struct { // Size of the uploaded part data. Size int64 + + // Checksum values of each part. + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string } // ListObjectPartsResult container for ListObjectParts response. @@ -299,16 +305,26 @@ type completeMultipartUploadResult struct { Bucket string Key string ETag string + + // Checksum values, hash of hashes of parts. + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string } // CompletePart sub container lists individual part numbers and their // md5sum, part of completeMultipartUpload. type CompletePart struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` - // Part number identifies the part. PartNumber int ETag string + + // Checksum values + ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` + ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` + ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` + ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` } // completeMultipartUpload container for completing multipart upload. diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go index 6deb5f5dc946..418d6cb2547b 100644 --- a/vendor/github.com/minio/minio-go/v7/api-stat.go +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -70,6 +70,9 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, if opts.Internal.ReplicationDeleteMarker { headers.Set(minIOBucketReplicationDeleteMarker, "true") } + if opts.Internal.IsReplicationReadyForDeleteMarker { + headers.Set(isMinioTgtReplicationReady, "true") + } urlValues := make(url.Values) if opts.VersionID != "" { @@ -90,6 +93,7 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, if resp != nil { deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" + replicationReady := resp.Header.Get(minioTgtReplicationReady) == "true" if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker { errResp := ErrorResponse{ @@ -105,8 +109,9 @@ func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, }, errResp } return ObjectInfo{ - VersionID: resp.Header.Get(amzVersionID), - IsDeleteMarker: deleteMarker, + VersionID: resp.Header.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + ReplicationReady: replicationReady, // whether delete marker can be replicated }, httpRespToErrorResponse(resp, bucketName, objectName) } } diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 31a9a078d4c5..a93a6c6207a5 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -20,8 +20,10 @@ package minio import ( "bytes" "context" + "encoding/base64" "errors" "fmt" + "hash/crc32" "io" "io/ioutil" "math/rand" @@ -93,6 +95,8 @@ type Client struct { sha256Hasher func() md5simd.Hasher healthStatus int32 + + trailingHeaderSupport bool } // Options for New method @@ -103,6 +107,10 @@ type Options struct { Region string BucketLookup BucketLookupType + // TrailingHeaders indicates server support of trailing headers. + // Only supported for v4 signatures. + TrailingHeaders bool + // Custom hash routines. Leave nil to use standard. CustomMD5 func() md5simd.Hasher CustomSHA256 func() md5simd.Hasher @@ -111,13 +119,13 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.32" + libraryVersion = "v7.0.45" ) // User Agent should always following the below style. // Please open an issue to discuss any new changes here. // -// MinIO (OS; ARCH) LIB/VER APP/VER +// MinIO (OS; ARCH) LIB/VER APP/VER const ( libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion @@ -246,6 +254,9 @@ func privateNew(endpoint string, opts *Options) (*Client, error) { if clnt.sha256Hasher == nil { clnt.sha256Hasher = newSHA256Hasher } + + clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4() + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. clnt.lookup = opts.BucketLookup @@ -312,9 +323,9 @@ func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { // Hash materials provides relevant initialized hash algo writers // based on the expected signature type. // -// - For signature v4 request if the connection is insecure compute only sha256. -// - For signature v4 request if the connection is secure compute only md5. -// - For anonymous request compute md5. +// - For signature v4 request if the connection is insecure compute only sha256. +// - For signature v4 request if the connection is secure compute only md5. +// - For anonymous request compute md5. func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { hashSums = make(map[string][]byte) hashAlgos = make(map[string]md5simd.Hasher) @@ -384,13 +395,11 @@ func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, erro gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second) _, err := c.getBucketLocation(gctx, probeBucketName) gcancel() - if IsNetworkOrHostDown(err, false) { - // Still network errors do not need to do anything. - continue - } - switch ToErrorResponse(err).Code { - case "NoSuchBucket", "AccessDenied", "": - atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) + if !IsNetworkOrHostDown(err, false) { + switch ToErrorResponse(err).Code { + case "NoSuchBucket", "AccessDenied", "": + atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) + } } } @@ -421,6 +430,8 @@ type requestMetadata struct { contentMD5Base64 string // carries base64 encoded md5sum contentSHA256Hex string // carries hex encoded sha256sum streamSha256 bool + addCrc bool + trailer http.Header // (http.Request).Trailer. Requires v4 signature. } // dumpHTTP - dump HTTP request and response. @@ -583,6 +594,17 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ } } + if metadata.addCrc { + if metadata.trailer == nil { + metadata.trailer = make(http.Header, 1) + } + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) { + // Update trailer when done. + metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) + }) + metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) + } // Instantiate a new request. var req *http.Request req, err = c.newRequest(ctx, method, metadata) @@ -594,15 +616,15 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ return nil, err } + // Initiate the request. res, err = c.do(req) if err != nil { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return nil, err + if isRequestErrorRetryable(err) { + // Retry the request + continue } - - // Retry the request - continue + return nil, err } // For any known successful http status, return quickly. @@ -635,7 +657,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ // code dictates invalid region, we can retry the request // with the new region. // - // Additionally we should only retry if bucketLocation and custom + // Additionally, we should only retry if bucketLocation and custom // region is empty. if c.region == "" { switch errResponse.Code { @@ -817,9 +839,12 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request // Add signature version '2' authorization header. req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) case metadata.streamSha256 && !c.secure: - // Streaming signature is used by default for a PUT object request. Additionally we also - // look if the initialized client is secure, if yes then we don't need to perform - // streaming signature. + if len(metadata.trailer) > 0 { + req.Trailer = metadata.trailer + } + // Streaming signature is used by default for a PUT object request. + // Additionally, we also look if the initialized client is secure, + // if yes then we don't need to perform streaming signature. req = signer.StreamingSignV4(req, accessKeyID, secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) default: @@ -827,11 +852,17 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request shaHeader := unsignedPayload if metadata.contentSHA256Hex != "" { shaHeader = metadata.contentSHA256Hex + if len(metadata.trailer) > 0 { + // Sanity check, we should not end up here if upstream is sane. + return nil, errors.New("internal error: contentSHA256Hex with trailer not supported") + } + } else if len(metadata.trailer) > 0 { + shaHeader = unsignedPayloadTrailer } req.Header.Set("X-Amz-Content-Sha256", shaHeader) // Add signature version '4' authorization header. - req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) + req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer) } // Return request. diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go index dee83b870cc5..1c3e8e6f6461 100644 --- a/vendor/github.com/minio/minio-go/v7/constants.go +++ b/vendor/github.com/minio/minio-go/v7/constants.go @@ -46,6 +46,10 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 // we don't want to sign the request payload const unsignedPayload = "UNSIGNED-PAYLOAD" +// unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload, but have a trailer. +const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" + // Total number of parallel workers used for multipart operation. const totalWorkers = 4 @@ -96,6 +100,9 @@ const ( minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp" // Header indicates last legalhold update time on source minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp" - - minIOForceDelete = "x-minio-force-delete" + minIOForceDelete = "x-minio-force-delete" + // Header indicates delete marker replication request can be sent by source now. + minioTgtReplicationReady = "X-Minio-Replication-Ready" + // Header asks if delete marker replication request can be sent by source now. + isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready" ) diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go index 148671eec542..207a38702071 100644 --- a/vendor/github.com/minio/minio-go/v7/core.go +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -88,8 +88,19 @@ func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarke // PutObjectPart - Upload an object part. func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { - streamSha256 := true - return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse, streamSha256) + p := uploadPartParams{ + bucketName: bucket, + objectName: object, + uploadID: uploadID, + reader: data, + partNumber: partID, + md5Base64: md5Base64, + sha256Hex: sha256Hex, + size: size, + sse: sse, + streamSha256: true, + } + return c.uploadPart(ctx, p) } // ListObjectParts - List uploaded parts of an incomplete upload.x diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index 59f347efffef..e86e142e5520 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -24,8 +24,11 @@ import ( "archive/zip" "bytes" "context" + "crypto/sha1" + "encoding/base64" "errors" "fmt" + "hash" "hash/crc32" "io" "io/ioutil" @@ -46,6 +49,7 @@ import ( "github.com/dustin/go-humanize" jsoniter "github.com/json-iterator/go" + "github.com/minio/sha256-simd" log "github.com/sirupsen/logrus" "github.com/minio/minio-go/v7" @@ -1991,6 +1995,167 @@ func testObjectTaggingWithVersioning() { successLogger(testName, function, args, startTime).Info() } +// Test PutObject with custom checksums. +func testPutObjectWithChecksums() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + //c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + tests := []struct { + header string + hasher hash.Hash + + // Checksum values + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + }{ + {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()}, + {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, + {header: "x-amz-checksum-sha1", hasher: sha1.New()}, + {header: "x-amz-checksum-sha256", hasher: sha256.New()}, + } + + for i, test := range tests { + bufSize := dataFileMap["datafile-129-MB"] + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + cmpChecksum := func(got, want string) { + if want != got { + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) + return + } + } + + meta := map[string]string{} + reader := getDataReader("datafile-129-MB") + b, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + h := test.hasher + h.Reset() + // Wrong CRC. + meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + args["metadata"] = meta + + resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + UserMetadata: meta, + }) + if err == nil { + if i == 0 && resp.ChecksumCRC32 == "" { + ignoredLog(testName, function, args, startTime, "Checksums does not appear to be supported by backend").Info() + return + } + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Set correct CRC. + h.Write(b) + meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + reader.Close() + + resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + UserMetadata: meta, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + // Read the data back + gopts := minio.GetObjectOptions{Checksum: true} + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + delete(args, "metadata") + } + + successLogger(testName, function, args, startTime).Info() +} + // Test PutObject using a large data to trigger multipart readat func testPutObjectWithMetadata() { // initialize logging params @@ -4039,10 +4204,6 @@ func testPresignedPostPolicy() { logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) return } - if err := policy.SetKeyStartsWith(""); err == nil { - logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err) - return - } if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) return @@ -4051,10 +4212,6 @@ func testPresignedPostPolicy() { logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) return } - if err := policy.SetContentTypeStartsWith(""); err == nil { - logError(testName, function, args, startTime, "", "SetContentTypeStartsWith did not fail for invalid conditions", err) - return - } if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) return @@ -12128,6 +12285,7 @@ func main() { testComposeObjectErrorCasesV2() testCompose10KSourcesV2() testUserMetadataCopyingV2() + testPutObjectWithChecksums() testPutObject0ByteV2() testPutObjectNoLengthV2() testPutObjectsUnknownV2() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go index 12ed08427707..e964b5217378 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -19,6 +19,7 @@ package credentials import ( "bytes" + "crypto/sha256" "encoding/hex" "encoding/xml" "errors" @@ -31,7 +32,6 @@ import ( "time" "github.com/minio/minio-go/v7/pkg/signer" - sha256 "github.com/minio/sha256-simd" ) // AssumeRoleResponse contains the result of successful AssumeRole request. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go index 6dc8e9d05254..ddccfb173fe6 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go @@ -31,18 +31,17 @@ package credentials // will cache that Provider for all calls to IsExpired(), until Retrieve is // called again after IsExpired() is true. // -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvAWSS3{}, -// &credentials.EnvMinio{}, -// }) -// -// // Usage of ChainCredentials. -// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") -// if err != nil { -// log.Fatalln(err) -// } +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvAWSS3{}, +// &credentials.EnvMinio{}, +// }) // +// // Usage of ChainCredentials. +// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") +// if err != nil { +// log.Fatalln(err) +// } type Chain struct { Providers []Provider curr Provider diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go index 6b93a27fbd47..af6104967c76 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go @@ -65,10 +65,11 @@ type Provider interface { // provider's struct. // // Example: -// type IAMCredentialProvider struct { -// Expiry -// ... -// } +// +// type IAMCredentialProvider struct { +// Expiry +// ... +// } type Expiry struct { // The date/time when to expire on expiration time.Time diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json new file mode 100644 index 000000000000..afbfad559ece --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json @@ -0,0 +1,7 @@ +{ + "Version": 1, + "SessionToken": "token", + "AccessKeyId": "accessKey", + "SecretAccessKey": "secret", + "Expiration": "9999-04-27T16:02:25.000Z" +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample index 7fc91d9d2047..e2dc1bfecb1d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample @@ -10,3 +10,6 @@ aws_secret_access_key = secret [with_colon] aws_access_key_id: accessKey aws_secret_access_key: secret + +[with_process] +credential_process = /bin/cat credentials.json diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go index 0c94477b752c..fbfb105491db 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go @@ -28,35 +28,33 @@ // // Example of using the environment variable credentials. // -// creds := NewFromEnv() -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } +// creds := NewFromEnv() +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } // // Example of forcing credentials to expire and be refreshed on the next Get(). // This may be helpful to proactively expire credentials and refresh them sooner // than they would naturally expire on their own. // -// creds := NewFromIAM("") -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. +// creds := NewFromIAM("") +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. // -// -// Custom Provider +// # Custom Provider // // Each Provider built into this package also provides a helper method to generate // a Credentials pointer setup with the provider. To use a custom Provider just // create a type which satisfies the Provider interface and pass it to the // NewCredentials method. // -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} // +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() package credentials diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go index cbdcfe25653d..da09707e3395 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go @@ -18,17 +18,33 @@ package credentials import ( + "encoding/json" + "errors" "os" + "os/exec" "path/filepath" + "strings" + "time" ini "gopkg.in/ini.v1" ) +// A externalProcessCredentials stores the output of a credential_process +type externalProcessCredentials struct { + Version int + SessionToken string + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + Expiration time.Time +} + // A FileAWSCredentials retrieves credentials from the current user's home // directory, and keeps track if those credentials are expired. // // Profile ini file example: $HOME/.aws/credentials type FileAWSCredentials struct { + Expiry + // Path to the shared credentials file. // // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the @@ -89,6 +105,33 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) { // Default to empty string if not found. token := iniProfile.Key("aws_session_token") + // If credential_process is defined, obtain credentials by executing + // the external process + credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String()) + if credentialProcess != "" { + args := strings.Fields(credentialProcess) + if len(args) <= 1 { + return Value{}, errors.New("invalid credential process args") + } + cmd := exec.Command(args[0], args[1:]...) + out, err := cmd.Output() + if err != nil { + return Value{}, err + } + var externalProcessCredentials externalProcessCredentials + err = json.Unmarshal([]byte(out), &externalProcessCredentials) + if err != nil { + return Value{}, err + } + p.retrieved = true + p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: externalProcessCredentials.AccessKeyID, + SecretAccessKey: externalProcessCredentials.SecretAccessKey, + SessionToken: externalProcessCredentials.SessionToken, + SignerType: SignatureV4, + }, nil + } p.retrieved = true return Value{ AccessKeyID: id.String(), @@ -98,11 +141,6 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) { }, nil } -// IsExpired returns if the shared credentials have expired. -func (p *FileAWSCredentials) IsExpired() bool { - return !p.retrieved -} - // loadProfiles loads from the file pointed to by shared credentials filename for profile. // The credentials retrieved from the profile will be returned or error. Error will be // returned if it fails to read from the file, or the data is invalid. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go index f7a4af4a2aec..14369cf10a72 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -289,7 +289,10 @@ func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, } // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html - token, _ := fetchIMDSToken(client, endpoint) + token, err := fetchIMDSToken(client, endpoint) + if err != nil { + return ec2RoleCredRespBody{}, err + } // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html u, err := getIAMRoleURL(endpoint) diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go index 1f106ef72c59..34598bd8e731 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go @@ -1,6 +1,6 @@ /* * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. + * Copyright 2019-2022 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ import ( "io/ioutil" "net/http" "net/url" + "strings" "time" ) @@ -122,12 +123,14 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string, if err != nil { return AssumeRoleWithClientGrantsResponse{}, err } - u.RawQuery = v.Encode() - req, err := http.NewRequest(http.MethodPost, u.String(), nil) + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) if err != nil { return AssumeRoleWithClientGrantsResponse{}, err } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, err := clnt.Do(req) if err != nil { return AssumeRoleWithClientGrantsResponse{}, err diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go index ab588712cf69..e1f9ce4bea87 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go @@ -89,12 +89,12 @@ func (c *CustomTokenIdentity) Retrieve() (value Value, err error) { req, err := http.NewRequest(http.MethodPost, u.String(), nil) if err != nil { - return value, stripPassword(err) + return value, err } resp, err := c.Client.Do(req) if err != nil { - return value, stripPassword(err) + return value, err } defer resp.Body.Close() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go index 586995e86f6b..25b45ecb09e3 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -1,6 +1,6 @@ /* * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019-2021 MinIO, Inc. + * Copyright 2019-2022 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "io/ioutil" "net/http" "net/url" + "strings" "time" ) @@ -105,22 +106,6 @@ func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt { } } -func stripPassword(err error) error { - urlErr, ok := err.(*url.Error) - if ok { - u, _ := url.Parse(urlErr.URL) - if u == nil { - return urlErr - } - values := u.Query() - values.Set("LDAPPassword", "xxxxx") - u.RawQuery = values.Encode() - urlErr.URL = u.String() - return urlErr - } - return err -} - // NewLDAPIdentityWithSessionPolicy returns new credentials object that uses // LDAP Identity with a specified session policy. The `policy` parameter must be // a JSON string specifying the policy document. @@ -156,16 +141,16 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) { v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds()))) } - u.RawQuery = v.Encode() - - req, err := http.NewRequest(http.MethodPost, u.String(), nil) + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) if err != nil { - return value, stripPassword(err) + return value, err } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, err := k.Client.Do(req) if err != nil { - return value, stripPassword(err) + return value, err } defer resp.Body.Close() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go index 19bc3ddfc00e..50f5f1ce65f9 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -1,6 +1,6 @@ /* * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. + * Copyright 2019-2022 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ import ( "net/http" "net/url" "strconv" + "strings" "time" ) @@ -139,13 +140,13 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession return AssumeRoleWithWebIdentityResponse{}, err } - u.RawQuery = v.Encode() - - req, err := http.NewRequest(http.MethodPost, u.String(), nil) + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) if err != nil { return AssumeRoleWithWebIdentityResponse{}, err } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, err := clnt.Do(req) if err != nil { return AssumeRoleWithWebIdentityResponse{}, err diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go new file mode 100644 index 000000000000..6db26c036f56 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go @@ -0,0 +1,24 @@ +//go:build !fips +// +build !fips + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +// FIPS is true if 'fips' build tag was specified. +const FIPS = false diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go new file mode 100644 index 000000000000..640258242f90 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go @@ -0,0 +1,24 @@ +//go:build fips +// +build fips + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +// FIPS is true if 'fips' build tag was specified. +const FIPS = true diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go index 06e68e7367e2..163fa62b427d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go @@ -67,9 +67,9 @@ var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { // Type is the server-side-encryption method. It represents one of // the following encryption methods: -// - SSE-C: server-side-encryption with customer provided keys -// - KMS: server-side-encryption with managed keys -// - S3: server-side-encryption using S3 storage encryption +// - SSE-C: server-side-encryption with customer provided keys +// - KMS: server-side-encryption with managed keys +// - S3: server-side-encryption using S3 storage encryption type Type string const ( diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go index 743d8eca96b7..88a56b09fa12 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -329,15 +329,15 @@ type Expiration struct { XMLName xml.Name `xml:"Expiration,omitempty" json:"-"` Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` - DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"` + DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"` } // MarshalJSON customizes json encoding by removing empty day/date specification. func (e Expiration) MarshalJSON() ([]byte, error) { type expiration struct { - Date *ExpirationDate `json:"Date,omitempty"` - Days *ExpirationDays `json:"Days,omitempty"` - DeleteMarker ExpireDeleteMarker + Date *ExpirationDate `json:"Date,omitempty"` + Days *ExpirationDays `json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"` } newexp := expiration{ diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go index d0a47163838d..126661a9e68e 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go @@ -22,14 +22,14 @@ type identity struct { PrincipalID string `json:"principalId"` } -// event bucket metadata. +// event bucket metadata. type bucketMeta struct { Name string `json:"name"` OwnerIdentity identity `json:"ownerIdentity"` ARN string `json:"arn"` } -// event object metadata. +// event object metadata. type objectMeta struct { Key string `json:"key"` Size int64 `json:"size,omitempty"` @@ -40,7 +40,7 @@ type objectMeta struct { Sequencer string `json:"sequencer"` } -// event server specific metadata. +// event server specific metadata. type eventMeta struct { SchemaVersion string `json:"s3SchemaVersion"` ConfigurationID string `json:"configurationId"` diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go index 75a1f609621e..931ca5bc284a 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -21,6 +21,7 @@ import ( "encoding/xml" "errors" "fmt" + "strings" "github.com/minio/minio-go/v7/pkg/set" ) @@ -29,7 +30,8 @@ import ( type EventType string // The role of all event types are described in : -// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +// +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations const ( ObjectCreatedAll EventType = "s3:ObjectCreated:*" ObjectCreatedPut = "s3:ObjectCreated:Put" @@ -87,6 +89,27 @@ func NewArn(partition, service, region, accountID, resource string) Arn { } } +var ( + // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn' + ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'") + // ErrInvalidArnFormat is returned when ARN string format is not valid + ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:::::'") +) + +// NewArnFromString parses string representation of ARN into Arn object. +// Returns an error if the string format is incorrect. +func NewArnFromString(arn string) (Arn, error) { + parts := strings.Split(arn, ":") + if len(parts) != 6 { + return Arn{}, ErrInvalidArnFormat + } + if parts[0] != "arn" { + return Arn{}, ErrInvalidArnPrefix + } + + return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil +} + // String returns the string format of the ARN func (arn Arn) String() string { return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go new file mode 100644 index 000000000000..5838b9de9958 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go @@ -0,0 +1,225 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" +) + +// getUnsignedChunkLength - calculates the length of chunk metadata +func getUnsignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + crlfLen + + chunkDataSize + + crlfLen +} + +// getUSStreamLength - calculates the length of the overall stream (data + metadata) +func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getUnsignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getUnsignedChunkLength(remainingBytes) + } + streamLen += getUnsignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += crlfLen + } + + return streamLen +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + req.TransferEncoding = []string{"aws-chunked"} + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) +} + +// StreamingUSReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingUSReader struct { + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + chunkNum int + totalChunks int + lastChunkSize int + trailer http.Header +} + +// writeChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) { + s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n") + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + if addCrLf { + s.buf.Write([]byte("\r\n")) + } + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingUSReader) addTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + +// StreamingUnsignedV4 - provides chunked upload +func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request { + // Set headers needed for streaming signature. + prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingUSReader{ + baseReadCloser: req.Body, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } + + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingUSReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.writeChunk(s.chunkBufLen, true) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) + } + + // Sign the chunk and write it to s.buf. + s.writeChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addTrailer(s.trailer) + } + break + } + return 0, err + } + + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingUSReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go index b1296d2b1762..49e999b01a32 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -32,13 +32,17 @@ import ( // Reference for constants used below - // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming const ( - streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" - emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - payloadChunkSize = 64 * 1024 - chunkSigConstLen = 17 // ";chunk-signature=" - signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" - crlfLen = 2 // CRLF + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF + trailerKVSeparator = ":" + trailerSignature = "x-amz-trailer-signature" ) // Request headers to be ignored while calculating seed signature for @@ -60,7 +64,7 @@ func getSignedChunkLength(chunkDataSize int64) int64 { } // getStreamLength - calculates the length of the overall stream (data + metadata) -func getStreamLength(dataLen, chunkSize int64) int64 { +func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { if dataLen <= 0 { return 0 } @@ -73,6 +77,15 @@ func getStreamLength(dataLen, chunkSize int64) int64 { streamLen += getSignedChunkLength(remainingBytes) } streamLen += getSignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen + } + return streamLen } @@ -91,18 +104,41 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [ return strings.Join(stringToSignParts, "\n") } +// buildTrailerChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildTrailerChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { + stringToSignParts := []string{ + streamingTrailerHdr, + t.Format(iso8601DateFormat), + getScope(region, t, ServiceTypeS3), + previousSig, + hex.EncodeToString(sum256(chunkData)), + } + + return strings.Join(stringToSignParts, "\n") +} + // prepareStreamingRequest - prepares a request with appropriate // headers before computing the seed signature. func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { // Set x-amz-content-sha256 header. - req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + if len(req.Trailer) == 0 { + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + } else { + req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm) + for k := range req.Trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + req.TransferEncoding = []string{"aws-chunked"} + } + if sessionToken != "" { req.Header.Set("X-Amz-Security-Token", sessionToken) } req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) // Set content length with streaming signature for each chunk included. - req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) } @@ -122,6 +158,16 @@ func buildChunkSignature(chunkData []byte, reqTime time.Time, region, return getSignature(signingKey, chunkStringToSign) } +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildTrailerChunkSignature(chunkData []byte, reqTime time.Time, region, + previousSignature, secretAccessKey string, +) string { + chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region, + previousSignature, chunkData) + signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) + return getSignature(signingKey, chunkStringToSign) +} + // getSeedSignature - returns the seed signature for a given request. func (s *StreamingReader) setSeedSignature(req *http.Request) { // Get canonical request @@ -156,10 +202,11 @@ type StreamingReader struct { chunkNum int totalChunks int lastChunkSize int + trailer http.Header } // signChunk - signs a chunk read from s.baseReader of chunkLen size. -func (s *StreamingReader) signChunk(chunkLen int) { +func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) { // Compute chunk signature for next header signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, s.region, s.prevSignature, s.secretAccessKey) @@ -175,13 +222,40 @@ func (s *StreamingReader) signChunk(chunkLen int) { s.buf.Write(s.chunkBuf[:chunkLen]) // Write the chunk trailer. - s.buf.Write([]byte("\r\n")) + if addCrLf { + s.buf.Write([]byte("\r\n")) + } // Reset chunkBufLen for next chunk read. s.chunkBufLen = 0 s.chunkNum++ } +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingReader) addSignedTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + // Compute chunk signature + signature := buildTrailerChunkSignature(s.chunkBuf, s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + // setStreamingAuthHeader - builds and sets authorization header value // for streaming signature. func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { @@ -222,6 +296,11 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, lastChunkSize: int(dataLen % payloadChunkSize), } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } // Add the request headers required for chunk upload signing. @@ -272,7 +351,7 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { (s.chunkNum == s.totalChunks-1 && s.chunkBufLen == s.lastChunkSize) { // Sign the chunk and write it to s.buf. - s.signChunk(s.chunkBufLen) + s.signChunk(s.chunkBufLen, true) break } } @@ -289,7 +368,11 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { } // Sign the chunk and write it to s.buf. - s.signChunk(0) + s.signChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addSignedTrailer(s.trailer) + } break } return 0, err diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go index cf7921d1f643..fa4f8c91e6cf 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go @@ -162,11 +162,12 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost b // From the Amazon docs: // // StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Expires + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; +// +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; func preStringToSignV2(req http.Request, virtualHost bool) string { buf := new(bytes.Buffer) // Write standard headers. @@ -189,11 +190,12 @@ func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { // From the Amazon docs: // // StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; +// +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; func stringToSignV2(req http.Request, virtualHost bool) string { buf := new(bytes.Buffer) // Write standard headers. @@ -281,8 +283,9 @@ var resourceList = []string{ // From the Amazon docs: // // CanonicalizedResource = [ "/" + Bucket ] + -// + -// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { // Save request URL. requestURL := req.URL diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go index a12608ebb0b6..34914490c0ed 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go @@ -42,7 +42,6 @@ const ( ServiceTypeSTS = "sts" ) -// // Excerpts from @lsegal - // https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. // @@ -57,7 +56,6 @@ const ( // * Accept-Encoding // Some S3 servers like Hitachi Content Platform do not honor this header for signature // calculation. -// var v4IgnoredHeaders = map[string]bool{ "Accept-Encoding": true, "Authorization": true, @@ -177,12 +175,13 @@ func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { // getCanonicalRequest generate a canonical request of style. // // canonicalRequest = -// \n -// \n -// \n -// \n -// \n -// +// +// \n +// \n +// \n +// \n +// \n +// func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") canonicalRequest := strings.Join([]string{ @@ -264,11 +263,11 @@ func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l // SignV4STS - signature v4 for STS request. func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS) + return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil) } // Internal function called for different service types. -func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string) *http.Request { +func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request { // Signature calculation is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -285,6 +284,15 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati req.Header.Set("X-Amz-Security-Token", sessionToken) } + if len(trailer) > 0 { + for k := range trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + + req.TransferEncoding = []string{"aws-chunked"} + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10)) + } + hashedPayload := getHashedPayload(req) if serviceType == ServiceTypeSTS { // Content sha256 header is not sent with the request @@ -322,11 +330,22 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, locati auth := strings.Join(parts, ", ") req.Header.Set("Authorization", auth) + if len(trailer) > 0 { + // Use custom chunked encoding. + req.Trailer = trailer + return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC()) + } return &req } // SignV4 sign the request before Do(), in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3) + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil) +} + +// SignV4Trailer sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer) } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go index b54fa4c77cfd..333f1aa250fc 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go @@ -19,10 +19,9 @@ package signer import ( "crypto/hmac" + "crypto/sha256" "net/http" "strings" - - "github.com/minio/sha256-simd" ) // unsignedPayload - value to be set to X-Amz-Content-Sha256 header when diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go index d7c65af5a0ff..98ae17efad34 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go @@ -1,5 +1,6 @@ /* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020-2022 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +21,8 @@ import ( "encoding/xml" "io" "net/url" + "regexp" + "sort" "strings" "unicode/utf8" ) @@ -63,8 +66,17 @@ const ( maxTagCount = 50 ) +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions +// borrowed from this article and also testing various ASCII characters following regex +// is supported by AWS S3 for both tags and values. +var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`) + func checkKey(key string) error { - if len(key) == 0 || utf8.RuneCountInString(key) > maxKeyLength || strings.Contains(key, "&") { + if len(key) == 0 { + return errInvalidTagKey + } + + if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) { return errInvalidTagKey } @@ -72,8 +84,10 @@ func checkKey(key string) error { } func checkValue(value string) error { - if utf8.RuneCountInString(value) > maxValueLength || strings.Contains(value, "&") { - return errInvalidTagValue + if value != "" { + if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) { + return errInvalidTagValue + } } return nil @@ -136,11 +150,26 @@ type tagSet struct { } func (tags tagSet) String() string { - vals := make(url.Values) - for key, value := range tags.tagMap { - vals.Set(key, value) + if len(tags.tagMap) == 0 { + return "" } - return vals.Encode() + var buf strings.Builder + keys := make([]string, 0, len(tags.tagMap)) + for k := range tags.tagMap { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + keyEscaped := url.QueryEscape(k) + valueEscaped := url.QueryEscape(tags.tagMap[k]) + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(keyEscaped) + buf.WriteByte('=') + buf.WriteString(valueEscaped) + } + return buf.String() } func (tags *tagSet) remove(key string) { @@ -175,7 +204,7 @@ func (tags *tagSet) set(key, value string, failOnExist bool) error { } func (tags tagSet) toMap() map[string]string { - m := make(map[string]string) + m := make(map[string]string, len(tags.tagMap)) for key, value := range tags.tagMap { m[key] = value } @@ -188,6 +217,7 @@ func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error { Tags []Tag `xml:"Tag"` }{} + tagList.Tags = make([]Tag, 0, len(tags.tagMap)) for key, value := range tags.tagMap { tagList.Tags = append(tagList.Tags, Tag{key, value}) } @@ -213,7 +243,7 @@ func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { return errTooManyTags } - m := map[string]string{} + m := make(map[string]string, len(tagList.Tags)) for _, tag := range tagList.Tags { if _, found := m[tag.Key]; found { return errDuplicateTagKey @@ -311,14 +341,49 @@ func ParseObjectXML(reader io.Reader) (*Tags, error) { return unmarshalXML(reader, true) } +// stringsCut slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func stringsCut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +func (tags *tagSet) parseTags(tgs string) (err error) { + for tgs != "" { + var key string + key, tgs, _ = stringsCut(tgs, "&") + if key == "" { + continue + } + key, value, _ := stringsCut(key, "=") + key, err1 := url.QueryUnescape(key) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + value, err1 = url.QueryUnescape(value) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + if err = tags.set(key, value, true); err != nil { + return err + } + } + return err +} + // Parse decodes HTTP query formatted string into tags which is limited by isObject. // A query formatted string is like "key1=value1&key2=value2". func Parse(s string, isObject bool) (*Tags, error) { - values, err := url.ParseQuery(s) - if err != nil { - return nil, err - } - tagging := &Tags{ TagSet: &tagSet{ tagMap: make(map[string]string), @@ -326,10 +391,8 @@ func Parse(s string, isObject bool) (*Tags, error) { }, } - for key := range values { - if err := tagging.TagSet.set(key, values.Get(key), true); err != nil { - return nil, err - } + if err := tagging.TagSet.parseTags(s); err != nil { + return nil, err } return tagging, nil diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 7aa96e0d69fa..4b3df19127fe 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -32,12 +32,11 @@ const expirationDateFormat = "2006-01-02T15:04:05.999Z" // // Example: // -// policyCondition { -// matchType: "$eq", -// key: "$Content-Type", -// value: "image/png", -// } -// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } type policyCondition struct { matchType string condition string @@ -98,10 +97,8 @@ func (p *PostPolicy) SetKey(key string) error { // SetKeyStartsWith - Sets an object name that an policy based upload // can start with. +// Can use an empty value ("") to allow any key. func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { - if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { - return errInvalidArgument("Object prefix is empty.") - } policyCond := policyCondition{ matchType: "starts-with", condition: "$key", @@ -172,10 +169,8 @@ func (p *PostPolicy) SetContentType(contentType string) error { // SetContentTypeStartsWith - Sets what content-type of the object for this policy // based upload can start with. +// Can use an empty value ("") to allow any content-type. func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error { - if strings.TrimSpace(contentTypeStartsWith) == "" || contentTypeStartsWith == "" { - return errInvalidArgument("No content type specified.") - } policyCond := policyCondition{ matchType: "starts-with", condition: "$Content-Type", @@ -197,8 +192,8 @@ func (p *PostPolicy) SetContentLengthRange(min, max int64) error { if min < 0 { return errInvalidArgument("Minimum limit cannot be negative.") } - if max < 0 { - return errInvalidArgument("Maximum limit cannot be negative.") + if max <= 0 { + return errInvalidArgument("Maximum limit cannot be non-positive.") } p.contentLengthRange.min = min p.contentLengthRange.max = max @@ -286,10 +281,14 @@ func (p *PostPolicy) SetUserData(key string, value string) error { } // addNewPolicy - internal helper to validate adding new policies. +// Can use starts-with with an empty value ("") to allow any content within a form field. func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { - if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + if policyCond.matchType == "" || policyCond.condition == "" { return errInvalidArgument("Policy fields are empty.") } + if policyCond.matchType != "starts-with" && policyCond.value == "" { + return errInvalidArgument("Policy value is empty.") + } p.conditions = append(p.conditions, policyCond) return nil } diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go index f454e675cf5b..055c14c4dec7 100644 --- a/vendor/github.com/minio/minio-go/v7/retry.go +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -19,7 +19,10 @@ package minio import ( "context" + "crypto/x509" + "errors" "net/http" + "net/url" "time" ) @@ -123,3 +126,23 @@ func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { _, ok = retryableHTTPStatusCodes[httpStatusCode] return ok } + +// For now, all http Do() requests are retriable except some well defined errors +func isRequestErrorRetryable(err error) bool { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return false + } + if ue, ok := err.(*url.Error); ok { + e := ue.Unwrap() + switch e.(type) { + // x509: certificate signed by unknown authority + case x509.UnknownAuthorityError: + return false + } + switch e.Error() { + case "http: server gave HTTP response to HTTPS client": + return false + } + } + return true +} diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go index 3a4cacfe811b..589c0e5498f5 100644 --- a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go +++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go @@ -28,8 +28,10 @@ var awsS3EndpointMap = map[string]string{ "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", + "eu-central-2": "s3.dualstack.eu-central-2.amazonaws.com", "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com", + "eu-south-2": "s3.dualstack.eu-south-2.amazonaws.com", "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", @@ -38,6 +40,7 @@ var awsS3EndpointMap = map[string]string{ "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com", "af-south-1": "s3.dualstack.af-south-1.amazonaws.com", + "me-central-1": "s3.dualstack.me-central-1.amazonaws.com", "me-south-1": "s3.dualstack.me-south-1.amazonaws.com", "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index 11a4f340327c..a8a45b1a8ce1 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -20,6 +20,7 @@ package minio import ( "context" "crypto/md5" + fipssha256 "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/xml" @@ -39,6 +40,7 @@ import ( "time" md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/s3utils" "github.com/minio/sha256-simd" ) @@ -376,6 +378,12 @@ func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectIn UserTags: userTags, UserTagCount: tagCount, Restore: restore, + + // Checksum values + ChecksumCRC32: h.Get("x-amz-checksum-crc32"), + ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), + ChecksumSHA1: h.Get("x-amz-checksum-sha1"), + ChecksumSHA256: h.Get("x-amz-checksum-sha256"), }, nil } @@ -501,7 +509,7 @@ func isSSEHeader(headerKey string) bool { func isAmzHeader(headerKey string) bool { key := strings.ToLower(headerKey) - return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) + return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-") } var ( @@ -514,6 +522,9 @@ func newMd5Hasher() md5simd.Hasher { } func newSHA256Hasher() md5simd.Hasher { + if encrypt.FIPS { + return &hashWrapper{Hash: fipssha256.New(), isSHA256: true} + } return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true} } @@ -621,3 +632,38 @@ func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { } return false } + +// newHashReaderWrapper will hash all reads done through r. +// When r returns io.EOF the done function will be called with the sum. +func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper { + return &hashReaderWrapper{ + r: r, + h: h, + done: done, + } +} + +type hashReaderWrapper struct { + r io.Reader + h hash.Hash + done func(hash []byte) +} + +// Read implements the io.Reader interface. +func (h *hashReaderWrapper) Read(p []byte) (n int, err error) { + n, err = h.r.Read(p) + if n > 0 { + n2, err := h.h.Write(p[:n]) + if err != nil { + return 0, err + } + if n2 != n { + return 0, io.ErrShortWrite + } + } + if err == io.EOF { + // Call back + h.done(h.h.Sum(nil)) + } + return n, err +} diff --git a/vendor/github.com/oschwald/geoip2-golang/.gitignore b/vendor/github.com/oschwald/geoip2-golang/.gitignore new file mode 100644 index 000000000000..dca06949cad1 --- /dev/null +++ b/vendor/github.com/oschwald/geoip2-golang/.gitignore @@ -0,0 +1,3 @@ +.vscode +*.out +*.test diff --git a/vendor/github.com/oschwald/geoip2-golang/.gitmodules b/vendor/github.com/oschwald/geoip2-golang/.gitmodules new file mode 100644 index 000000000000..400b2ab62c0f --- /dev/null +++ b/vendor/github.com/oschwald/geoip2-golang/.gitmodules @@ -0,0 +1,3 @@ +[submodule "test-data"] + path = test-data + url = https://github.com/maxmind/MaxMind-DB.git diff --git a/vendor/github.com/oschwald/geoip2-golang/.golangci.toml b/vendor/github.com/oschwald/geoip2-golang/.golangci.toml new file mode 100644 index 000000000000..b4f7e6a1473e --- /dev/null +++ b/vendor/github.com/oschwald/geoip2-golang/.golangci.toml @@ -0,0 +1,472 @@ +[run] + deadline = "10m" + + tests = true + +[linters] + disable-all = true + enable = [ + "asciicheck", + "bidichk", + "bodyclose", + "containedctx", + "contextcheck", + "deadcode", + "depguard", + "durationcheck", + "errcheck", + "errchkjson", + "errname", + "errorlint", + "exportloopref", + "forbidigo", + #"forcetypeassert", + "goconst", + "gocyclo", + "gocritic", + "godot", + "gofumpt", + "gomodguard", + "gosec", + "gosimple", + "govet", + "grouper", + "ineffassign", + "lll", + "makezero", + "maintidx", + "misspell", + "nakedret", + "nilerr", + "noctx", + "nolintlint", + "nosprintfhostport", + "predeclared", + "revive", + "rowserrcheck", + "sqlclosecheck", + "staticcheck", + "structcheck", + "stylecheck", + "tenv", + "tparallel", + "typecheck", + "unconvert", + "unparam", + "unused", + "varcheck", + "vetshadow", + "wastedassign", + ] + +# Please note that we only use depguard for stdlib as gomodguard only +# supports modules currently. See https://github.com/ryancurrah/gomodguard/issues/12 +[linters-settings.depguard] + list-type = "blacklist" + include-go-root = true + packages = [ + # ioutil is deprecated. The functions have been moved elsewhere: + # https://golang.org/doc/go1.16#ioutil + "io/ioutil", + ] + +[linters-settings.errcheck] + # Don't allow setting of error to the blank identifier. If there is a legtimate + # reason, there should be a nolint with an explanation. + check-blank = true + + exclude-functions = [ + # If we are rolling back a transaction, we are often already in an error + # state. + '(*database/sql.Tx).Rollback', + + # It is reasonable to ignore errors if Cleanup fails in most cases. + '(*github.com/google/renameio/v2.PendingFile).Cleanup', + + # We often don't care if removing a file failed (e.g., it doesn't exist) + 'os.Remove', + 'os.RemoveAll', + ] + + # Ignoring Close so that we don't have to have a bunch of + # `defer func() { _ = r.Close() }()` constructs when we + # don't actually care about the error. + ignore = "Close,fmt:.*" + +[linters-settings.errorlint] + errorf = true + asserts = true + comparison = true + +[linters-settings.exhaustive] + default-signifies-exhaustive = true + +[linters-settings.forbidigo] + # Forbid the following identifiers + forbid = [ + "^minFraud*", + "^maxMind*", + ] + +[linters-settings.gocritic] + enabled-checks = [ + "appendAssign", + "appendCombine", + "argOrder", + "assignOp", + "badCall", + "badCond", + "badLock", + "badRegexp", + "badSorting", + "boolExprSimplify", + "builtinShadow", + "builtinShadowDecl", + "captLocal", + "caseOrder", + "codegenComment", + "commentedOutCode", + "commentedOutImport", + "commentFormatting", + "defaultCaseOrder", + # Revive's defer rule already captures this. This caught no extra cases. + # "deferInLoop", + "deferUnlambda", + "deprecatedComment", + "docStub", + "dupArg", + "dupBranchBody", + "dupCase", + "dupImport", + "dupSubExpr", + "dynamicFmtString", + "elseif", + "emptyDecl", + "emptyFallthrough", + "emptyStringTest", + "equalFold", + "evalOrder", + "exitAfterDefer", + "exposedSyncMutex", + "externalErrorReassign", + # Given that all of our code runs on Linux and the / separate should + # work fine, this seems less important. + # "filepathJoin", + "flagDeref", + "flagName", + "hexLiteral", + "ifElseChain", + "importShadow", + "indexAlloc", + "initClause", + "ioutilDeprecated", + "mapKey", + "methodExprCall", + "nestingReduce", + "newDeref", + "nilValReturn", + "octalLiteral", + "offBy1", + "paramTypeCombine", + "preferDecodeRune", + "preferFilepathJoin", + "preferFprint", + "preferStringWriter", + "preferWriteByte", + "ptrToRefParam", + "rangeExprCopy", + "rangeValCopy", + "redundantSprint", + "regexpMust", + "regexpPattern", + # This might be good, but I don't think we want to encourage + # significant changes to regexes as we port stuff from Perl. + # "regexpSimplify", + "ruleguard", + "singleCaseSwitch", + "sliceClear", + "sloppyLen", + # This seems like it might also be good, but a lot of existing code + # fails. + # "sloppyReassign", + "returnAfterHttpError", + "sloppyTypeAssert", + "sortSlice", + "sprintfQuotedString", + "sqlQuery", + "stringsCompare", + "stringXbytes", + "switchTrue", + "syncMapLoadAndDelete", + "timeExprSimplify", + "todoCommentWithoutDetail", + "tooManyResultsChecker", + "truncateCmp", + "typeAssertChain", + "typeDefFirst", + "typeSwitchVar", + "typeUnparen", + "underef", + "unlabelStmt", + "unlambda", + # I am not sure we would want this linter and a lot of existing + # code fails. + # "unnamedResult", + "unnecessaryBlock", + "unnecessaryDefer", + "unslice", + "valSwap", + "weakCond", + "wrapperFunc", + "yodaStyleExpr", + # This requires explanations for "nolint" directives. This would be + # nice for gosec ones, but I am not sure we want it generally unless + # we can get the false positive rate lower. + # "whyNoLint" + ] + +[linters-settings.gofumpt] + extra-rules = true + lang-version = "1.18" + +[linters-settings.govet] + "enable-all" = true + +[linters-settings.lll] + line-length = 120 + tab-width = 4 + +[linters-settings.nolintlint] + allow-leading-space = false + allow-unused = false + allow-no-explanation = ["lll", "misspell"] + require-explanation = true + require-specific = true + +[linters-settings.revive] + ignore-generated-header = true + severity = "warning" + + # This might be nice but it is so common that it is hard + # to enable. + # [[linters-settings.revive.rules]] + # name = "add-constant" + + # [[linters-settings.revive.rules]] + # name = "argument-limit" + + [[linters-settings.revive.rules]] + name = "atomic" + + [[linters-settings.revive.rules]] + name = "bare-return" + + [[linters-settings.revive.rules]] + name = "blank-imports" + + [[linters-settings.revive.rules]] + name = "bool-literal-in-expr" + + [[linters-settings.revive.rules]] + name = "call-to-gc" + + # [[linters-settings.revive.rules]] + # name = "cognitive-complexity" + + # Probably a good rule, but we have a lot of names that + # only have case differences. + # [[linters-settings.revive.rules]] + # name = "confusing-naming" + + # [[linters-settings.revive.rules]] + # name = "confusing-results" + + [[linters-settings.revive.rules]] + name = "constant-logical-expr" + + [[linters-settings.revive.rules]] + name = "context-as-argument" + + [[linters-settings.revive.rules]] + name = "context-keys-type" + + # [[linters-settings.revive.rules]] + # name = "cyclomatic" + + # [[linters-settings.revive.rules]] + # name = "deep-exit" + + [[linters-settings.revive.rules]] + name = "defer" + + [[linters-settings.revive.rules]] + name = "dot-imports" + + [[linters-settings.revive.rules]] + name = "duplicated-imports" + + [[linters-settings.revive.rules]] + name = "early-return" + + [[linters-settings.revive.rules]] + name = "empty-block" + + [[linters-settings.revive.rules]] + name = "empty-lines" + + [[linters-settings.revive.rules]] + name = "errorf" + + [[linters-settings.revive.rules]] + name = "error-naming" + + [[linters-settings.revive.rules]] + name = "error-return" + + [[linters-settings.revive.rules]] + name = "error-strings" + + [[linters-settings.revive.rules]] + name = "exported" + + # [[linters-settings.revive.rules]] + # name = "file-header" + + # We have a lot of flag parameters. This linter probably makes + # a good point, but we would need some cleanup or a lot of nolints. + # [[linters-settings.revive.rules]] + # name = "flag-parameter" + + # [[linters-settings.revive.rules]] + # name = "function-result-limit" + + [[linters-settings.revive.rules]] + name = "get-return" + + [[linters-settings.revive.rules]] + name = "identical-branches" + + [[linters-settings.revive.rules]] + name = "if-return" + + [[linters-settings.revive.rules]] + name = "imports-blacklist" + + [[linters-settings.revive.rules]] + name = "import-shadowing" + + [[linters-settings.revive.rules]] + name = "increment-decrement" + + [[linters-settings.revive.rules]] + name = "indent-error-flow" + + # [[linters-settings.revive.rules]] + # name = "line-length-limit" + + # [[linters-settings.revive.rules]] + # name = "max-public-structs" + + [[linters-settings.revive.rules]] + name = "modifies-parameter" + + [[linters-settings.revive.rules]] + name = "modifies-value-receiver" + + # We frequently use nested structs, particularly in tests. + # [[linters-settings.revive.rules]] + # name = "nested-structs" + + [[linters-settings.revive.rules]] + name = "optimize-operands-order" + + [[linters-settings.revive.rules]] + name = "package-comments" + + [[linters-settings.revive.rules]] + name = "range" + + [[linters-settings.revive.rules]] + name = "range-val-address" + + [[linters-settings.revive.rules]] + name = "range-val-in-closure" + + [[linters-settings.revive.rules]] + name = "receiver-naming" + + [[linters-settings.revive.rules]] + name = "redefines-builtin-id" + + [[linters-settings.revive.rules]] + name = "string-of-int" + + [[linters-settings.revive.rules]] + name = "struct-tag" + + [[linters-settings.revive.rules]] + name = "superfluous-else" + + [[linters-settings.revive.rules]] + name = "time-naming" + + [[linters-settings.revive.rules]] + name = "unconditional-recursion" + + [[linters-settings.revive.rules]] + name = "unexported-naming" + + [[linters-settings.revive.rules]] + name = "unexported-return" + + # This is covered elsewhere and we want to ignore some + # functions such as fmt.Fprintf. + # [[linters-settings.revive.rules]] + # name = "unhandled-error" + + [[linters-settings.revive.rules]] + name = "unnecessary-stmt" + + [[linters-settings.revive.rules]] + name = "unreachable-code" + + [[linters-settings.revive.rules]] + name = "unused-parameter" + + # We generally have unused receivers in tests for meeting the + # requirements of an interface. + # [[linters-settings.revive.rules]] + # name = "unused-receiver" + + # This probably makes sense after we upgrade to 1.18 + # [[linters-settings.revive.rules]] + # name = "use-any" + + [[linters-settings.revive.rules]] + name = "useless-break" + + [[linters-settings.revive.rules]] + name = "var-declaration" + + [[linters-settings.revive.rules]] + name = "var-naming" + + [[linters-settings.revive.rules]] + name = "waitgroup-by-value" + +[linters-settings.unparam] + check-exported = true + +[[issues.exclude-rules]] + linters = [ + "govet" + ] + # we want to enable almost all govet rules. It is easier to just filter out + # the ones we don't want: + # + # * fieldalignment - way too noisy. Although it is very useful in particular + # cases where we are trying to use as little memory as possible, having + # it go off on every struct isn't helpful. + # * shadow - although often useful, it complains about _many_ err + # shadowing assignments and some others where shadowing is clear. + text = "^(fieldalignment|shadow)" diff --git a/vendor/github.com/oschwald/geoip2-golang/LICENSE b/vendor/github.com/oschwald/geoip2-golang/LICENSE new file mode 100644 index 000000000000..2969677f1590 --- /dev/null +++ b/vendor/github.com/oschwald/geoip2-golang/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2015, Gregory J. Oschwald + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/oschwald/geoip2-golang/README.md b/vendor/github.com/oschwald/geoip2-golang/README.md new file mode 100644 index 000000000000..72378f066d0b --- /dev/null +++ b/vendor/github.com/oschwald/geoip2-golang/README.md @@ -0,0 +1,93 @@ +# GeoIP2 Reader for Go # + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/oschwald/geoip2-golang)](https://pkg.go.dev/github.com/oschwald/geoip2-golang) + +This library reads MaxMind [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) +and [GeoIP2](http://www.maxmind.com/en/geolocation_landing) databases. + +This library is built using +[the Go maxminddb reader](https://github.com/oschwald/maxminddb-golang). +All data for the database record is decoded using this library. If you only +need several fields, you may get superior performance by using maxminddb's +`Lookup` directly with a result struct that only contains the required fields. +(See [example_test.go](https://github.com/oschwald/maxminddb-golang/blob/main/example_test.go) +in the maxminddb repository for an example of this.) + +## Installation ## + +``` +go get github.com/oschwald/geoip2-golang +``` + +## Usage ## + +[See GoDoc](http://godoc.org/github.com/oschwald/geoip2-golang) for +documentation and examples. + +## Example ## + +```go +package main + +import ( + "fmt" + "log" + "net" + + "github.com/oschwald/geoip2-golang" +) + +func main() { + db, err := geoip2.Open("GeoIP2-City.mmdb") + if err != nil { + log.Fatal(err) + } + defer db.Close() + // If you are using strings that may be invalid, check that ip is not nil + ip := net.ParseIP("81.2.69.142") + record, err := db.City(ip) + if err != nil { + log.Fatal(err) + } + fmt.Printf("Portuguese (BR) city name: %v\n", record.City.Names["pt-BR"]) + if len(record.Subdivisions) > 0 { + fmt.Printf("English subdivision name: %v\n", record.Subdivisions[0].Names["en"]) + } + fmt.Printf("Russian country name: %v\n", record.Country.Names["ru"]) + fmt.Printf("ISO country code: %v\n", record.Country.IsoCode) + fmt.Printf("Time zone: %v\n", record.Location.TimeZone) + fmt.Printf("Coordinates: %v, %v\n", record.Location.Latitude, record.Location.Longitude) + // Output: + // Portuguese (BR) city name: Londres + // English subdivision name: England + // Russian country name: Великобритания + // ISO country code: GB + // Time zone: Europe/London + // Coordinates: 51.5142, -0.0931 +} + +``` + +## Testing ## + +Make sure you checked out test data submodule: + +``` +git submodule init +git submodule update +``` + +Execute test suite: + +``` +go test +``` + +## Contributing ## + +Contributions welcome! Please fork the repository and open a pull request +with your changes. + +## License ## + +This is free software, licensed under the ISC license. diff --git a/vendor/github.com/oschwald/geoip2-golang/reader.go b/vendor/github.com/oschwald/geoip2-golang/reader.go new file mode 100644 index 000000000000..f8439ecd8f9c --- /dev/null +++ b/vendor/github.com/oschwald/geoip2-golang/reader.go @@ -0,0 +1,418 @@ +// Package geoip2 provides an easy-to-use API for the MaxMind GeoIP2 and +// GeoLite2 databases; this package does not support GeoIP Legacy databases. +// +// The structs provided by this package match the internal structure of +// the data in the MaxMind databases. +// +// See github.com/oschwald/maxminddb-golang for more advanced used cases. +package geoip2 + +import ( + "fmt" + "net" + + "github.com/oschwald/maxminddb-golang" +) + +// The Enterprise struct corresponds to the data in the GeoIP2 Enterprise +// database. +type Enterprise struct { + City struct { + Confidence uint8 `maxminddb:"confidence"` + GeoNameID uint `maxminddb:"geoname_id"` + Names map[string]string `maxminddb:"names"` + } `maxminddb:"city"` + Continent struct { + Code string `maxminddb:"code"` + GeoNameID uint `maxminddb:"geoname_id"` + Names map[string]string `maxminddb:"names"` + } `maxminddb:"continent"` + Country struct { + GeoNameID uint `maxminddb:"geoname_id"` + IsoCode string `maxminddb:"iso_code"` + Names map[string]string `maxminddb:"names"` + Confidence uint8 `maxminddb:"confidence"` + IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` + } `maxminddb:"country"` + Location struct { + AccuracyRadius uint16 `maxminddb:"accuracy_radius"` + Latitude float64 `maxminddb:"latitude"` + Longitude float64 `maxminddb:"longitude"` + MetroCode uint `maxminddb:"metro_code"` + TimeZone string `maxminddb:"time_zone"` + } `maxminddb:"location"` + Postal struct { + Code string `maxminddb:"code"` + Confidence uint8 `maxminddb:"confidence"` + } `maxminddb:"postal"` + RegisteredCountry struct { + GeoNameID uint `maxminddb:"geoname_id"` + IsoCode string `maxminddb:"iso_code"` + Names map[string]string `maxminddb:"names"` + Confidence uint8 `maxminddb:"confidence"` + IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` + } `maxminddb:"registered_country"` + RepresentedCountry struct { + GeoNameID uint `maxminddb:"geoname_id"` + IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` + IsoCode string `maxminddb:"iso_code"` + Names map[string]string `maxminddb:"names"` + Type string `maxminddb:"type"` + } `maxminddb:"represented_country"` + Subdivisions []struct { + Confidence uint8 `maxminddb:"confidence"` + GeoNameID uint `maxminddb:"geoname_id"` + IsoCode string `maxminddb:"iso_code"` + Names map[string]string `maxminddb:"names"` + } `maxminddb:"subdivisions"` + Traits struct { + AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"` + AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"` + ConnectionType string `maxminddb:"connection_type"` + Domain string `maxminddb:"domain"` + IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"` + IsLegitimateProxy bool `maxminddb:"is_legitimate_proxy"` + IsSatelliteProvider bool `maxminddb:"is_satellite_provider"` + ISP string `maxminddb:"isp"` + MobileCountryCode string `maxminddb:"mobile_country_code"` + MobileNetworkCode string `maxminddb:"mobile_network_code"` + Organization string `maxminddb:"organization"` + StaticIPScore float64 `maxminddb:"static_ip_score"` + UserType string `maxminddb:"user_type"` + } `maxminddb:"traits"` +} + +// The City struct corresponds to the data in the GeoIP2/GeoLite2 City +// databases. +type City struct { + City struct { + GeoNameID uint `maxminddb:"geoname_id"` + Names map[string]string `maxminddb:"names"` + } `maxminddb:"city"` + Continent struct { + Code string `maxminddb:"code"` + GeoNameID uint `maxminddb:"geoname_id"` + Names map[string]string `maxminddb:"names"` + } `maxminddb:"continent"` + Country struct { + GeoNameID uint `maxminddb:"geoname_id"` + IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` + IsoCode string `maxminddb:"iso_code"` + Names map[string]string `maxminddb:"names"` + } `maxminddb:"country"` + Location struct { + AccuracyRadius uint16 `maxminddb:"accuracy_radius"` + Latitude float64 `maxminddb:"latitude"` + Longitude float64 `maxminddb:"longitude"` + MetroCode uint `maxminddb:"metro_code"` + TimeZone string `maxminddb:"time_zone"` + } `maxminddb:"location"` + Postal struct { + Code string `maxminddb:"code"` + } `maxminddb:"postal"` + RegisteredCountry struct { + GeoNameID uint `maxminddb:"geoname_id"` + IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` + IsoCode string `maxminddb:"iso_code"` + Names map[string]string `maxminddb:"names"` + } `maxminddb:"registered_country"` + RepresentedCountry struct { + GeoNameID uint `maxminddb:"geoname_id"` + IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` + IsoCode string `maxminddb:"iso_code"` + Names map[string]string `maxminddb:"names"` + Type string `maxminddb:"type"` + } `maxminddb:"represented_country"` + Subdivisions []struct { + GeoNameID uint `maxminddb:"geoname_id"` + IsoCode string `maxminddb:"iso_code"` + Names map[string]string `maxminddb:"names"` + } `maxminddb:"subdivisions"` + Traits struct { + IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"` + IsSatelliteProvider bool `maxminddb:"is_satellite_provider"` + } `maxminddb:"traits"` +} + +// The Country struct corresponds to the data in the GeoIP2/GeoLite2 +// Country databases. +type Country struct { + Continent struct { + Code string `maxminddb:"code"` + GeoNameID uint `maxminddb:"geoname_id"` + Names map[string]string `maxminddb:"names"` + } `maxminddb:"continent"` + Country struct { + GeoNameID uint `maxminddb:"geoname_id"` + IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` + IsoCode string `maxminddb:"iso_code"` + Names map[string]string `maxminddb:"names"` + } `maxminddb:"country"` + RegisteredCountry struct { + GeoNameID uint `maxminddb:"geoname_id"` + IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` + IsoCode string `maxminddb:"iso_code"` + Names map[string]string `maxminddb:"names"` + } `maxminddb:"registered_country"` + RepresentedCountry struct { + GeoNameID uint `maxminddb:"geoname_id"` + IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` + IsoCode string `maxminddb:"iso_code"` + Names map[string]string `maxminddb:"names"` + Type string `maxminddb:"type"` + } `maxminddb:"represented_country"` + Traits struct { + IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"` + IsSatelliteProvider bool `maxminddb:"is_satellite_provider"` + } `maxminddb:"traits"` +} + +// The AnonymousIP struct corresponds to the data in the GeoIP2 +// Anonymous IP database. +type AnonymousIP struct { + IsAnonymous bool `maxminddb:"is_anonymous"` + IsAnonymousVPN bool `maxminddb:"is_anonymous_vpn"` + IsHostingProvider bool `maxminddb:"is_hosting_provider"` + IsPublicProxy bool `maxminddb:"is_public_proxy"` + IsResidentialProxy bool `maxminddb:"is_residential_proxy"` + IsTorExitNode bool `maxminddb:"is_tor_exit_node"` +} + +// The ASN struct corresponds to the data in the GeoLite2 ASN database. +type ASN struct { + AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"` + AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"` +} + +// The ConnectionType struct corresponds to the data in the GeoIP2 +// Connection-Type database. +type ConnectionType struct { + ConnectionType string `maxminddb:"connection_type"` +} + +// The Domain struct corresponds to the data in the GeoIP2 Domain database. +type Domain struct { + Domain string `maxminddb:"domain"` +} + +// The ISP struct corresponds to the data in the GeoIP2 ISP database. +type ISP struct { + AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"` + AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"` + ISP string `maxminddb:"isp"` + MobileCountryCode string `maxminddb:"mobile_country_code"` + MobileNetworkCode string `maxminddb:"mobile_network_code"` + Organization string `maxminddb:"organization"` +} + +type databaseType int + +const ( + isAnonymousIP = 1 << iota + isASN + isCity + isConnectionType + isCountry + isDomain + isEnterprise + isISP +) + +// Reader holds the maxminddb.Reader struct. It can be created using the +// Open and FromBytes functions. +type Reader struct { + mmdbReader *maxminddb.Reader + databaseType databaseType +} + +// InvalidMethodError is returned when a lookup method is called on a +// database that it does not support. For instance, calling the ISP method +// on a City database. +type InvalidMethodError struct { + Method string + DatabaseType string +} + +func (e InvalidMethodError) Error() string { + return fmt.Sprintf(`geoip2: the %s method does not support the %s database`, + e.Method, e.DatabaseType) +} + +// UnknownDatabaseTypeError is returned when an unknown database type is +// opened. +type UnknownDatabaseTypeError struct { + DatabaseType string +} + +func (e UnknownDatabaseTypeError) Error() string { + return fmt.Sprintf(`geoip2: reader does not support the %q database type`, + e.DatabaseType) +} + +// Open takes a string path to a file and returns a Reader struct or an error. +// The database file is opened using a memory map. Use the Close method on the +// Reader object to return the resources to the system. +func Open(file string) (*Reader, error) { + reader, err := maxminddb.Open(file) + if err != nil { + return nil, err + } + dbType, err := getDBType(reader) + return &Reader{reader, dbType}, err +} + +// FromBytes takes a byte slice corresponding to a GeoIP2/GeoLite2 database +// file and returns a Reader struct or an error. Note that the byte slice is +// used directly; any modification of it after opening the database will result +// in errors while reading from the database. +func FromBytes(bytes []byte) (*Reader, error) { + reader, err := maxminddb.FromBytes(bytes) + if err != nil { + return nil, err + } + dbType, err := getDBType(reader) + return &Reader{reader, dbType}, err +} + +func getDBType(reader *maxminddb.Reader) (databaseType, error) { + switch reader.Metadata.DatabaseType { + case "GeoIP2-Anonymous-IP": + return isAnonymousIP, nil + case "DBIP-ASN-Lite (compat=GeoLite2-ASN)", + "GeoLite2-ASN": + return isASN, nil + // We allow City lookups on Country for back compat + case "DBIP-City-Lite", + "DBIP-Country-Lite", + "DBIP-Country", + "DBIP-Location (compat=City)", + "GeoLite2-City", + "GeoIP2-City", + "GeoIP2-City-Africa", + "GeoIP2-City-Asia-Pacific", + "GeoIP2-City-Europe", + "GeoIP2-City-North-America", + "GeoIP2-City-South-America", + "GeoIP2-Precision-City", + "GeoLite2-Country", + "GeoIP2-Country": + return isCity | isCountry, nil + case "GeoIP2-Connection-Type": + return isConnectionType, nil + case "GeoIP2-Domain": + return isDomain, nil + case "DBIP-ISP (compat=Enterprise)", + "DBIP-Location-ISP (compat=Enterprise)", + "GeoIP2-Enterprise": + return isEnterprise | isCity | isCountry, nil + case "GeoIP2-ISP", + "GeoIP2-Precision-ISP": + return isISP | isASN, nil + default: + return 0, UnknownDatabaseTypeError{reader.Metadata.DatabaseType} + } +} + +// Enterprise takes an IP address as a net.IP struct and returns an Enterprise +// struct and/or an error. This is intended to be used with the GeoIP2 +// Enterprise database. +func (r *Reader) Enterprise(ipAddress net.IP) (*Enterprise, error) { + if isEnterprise&r.databaseType == 0 { + return nil, InvalidMethodError{"Enterprise", r.Metadata().DatabaseType} + } + var enterprise Enterprise + err := r.mmdbReader.Lookup(ipAddress, &enterprise) + return &enterprise, err +} + +// City takes an IP address as a net.IP struct and returns a City struct +// and/or an error. Although this can be used with other databases, this +// method generally should be used with the GeoIP2 or GeoLite2 City databases. +func (r *Reader) City(ipAddress net.IP) (*City, error) { + if isCity&r.databaseType == 0 { + return nil, InvalidMethodError{"City", r.Metadata().DatabaseType} + } + var city City + err := r.mmdbReader.Lookup(ipAddress, &city) + return &city, err +} + +// Country takes an IP address as a net.IP struct and returns a Country struct +// and/or an error. Although this can be used with other databases, this +// method generally should be used with the GeoIP2 or GeoLite2 Country +// databases. +func (r *Reader) Country(ipAddress net.IP) (*Country, error) { + if isCountry&r.databaseType == 0 { + return nil, InvalidMethodError{"Country", r.Metadata().DatabaseType} + } + var country Country + err := r.mmdbReader.Lookup(ipAddress, &country) + return &country, err +} + +// AnonymousIP takes an IP address as a net.IP struct and returns a +// AnonymousIP struct and/or an error. +func (r *Reader) AnonymousIP(ipAddress net.IP) (*AnonymousIP, error) { + if isAnonymousIP&r.databaseType == 0 { + return nil, InvalidMethodError{"AnonymousIP", r.Metadata().DatabaseType} + } + var anonIP AnonymousIP + err := r.mmdbReader.Lookup(ipAddress, &anonIP) + return &anonIP, err +} + +// ASN takes an IP address as a net.IP struct and returns a ASN struct and/or +// an error. +func (r *Reader) ASN(ipAddress net.IP) (*ASN, error) { + if isASN&r.databaseType == 0 { + return nil, InvalidMethodError{"ASN", r.Metadata().DatabaseType} + } + var val ASN + err := r.mmdbReader.Lookup(ipAddress, &val) + return &val, err +} + +// ConnectionType takes an IP address as a net.IP struct and returns a +// ConnectionType struct and/or an error. +func (r *Reader) ConnectionType(ipAddress net.IP) (*ConnectionType, error) { + if isConnectionType&r.databaseType == 0 { + return nil, InvalidMethodError{"ConnectionType", r.Metadata().DatabaseType} + } + var val ConnectionType + err := r.mmdbReader.Lookup(ipAddress, &val) + return &val, err +} + +// Domain takes an IP address as a net.IP struct and returns a +// Domain struct and/or an error. +func (r *Reader) Domain(ipAddress net.IP) (*Domain, error) { + if isDomain&r.databaseType == 0 { + return nil, InvalidMethodError{"Domain", r.Metadata().DatabaseType} + } + var val Domain + err := r.mmdbReader.Lookup(ipAddress, &val) + return &val, err +} + +// ISP takes an IP address as a net.IP struct and returns a ISP struct and/or +// an error. +func (r *Reader) ISP(ipAddress net.IP) (*ISP, error) { + if isISP&r.databaseType == 0 { + return nil, InvalidMethodError{"ISP", r.Metadata().DatabaseType} + } + var val ISP + err := r.mmdbReader.Lookup(ipAddress, &val) + return &val, err +} + +// Metadata takes no arguments and returns a struct containing metadata about +// the MaxMind database in use by the Reader. +func (r *Reader) Metadata() maxminddb.Metadata { + return r.mmdbReader.Metadata +} + +// Close unmaps the database file from virtual memory and returns the +// resources to the system. +func (r *Reader) Close() error { + return r.mmdbReader.Close() +} diff --git a/vendor/github.com/oschwald/maxminddb-golang/.gitignore b/vendor/github.com/oschwald/maxminddb-golang/.gitignore new file mode 100644 index 000000000000..fe3fa4ab9b5c --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/.gitignore @@ -0,0 +1,4 @@ +.vscode +*.out +*.sw? +*.test diff --git a/vendor/github.com/oschwald/maxminddb-golang/.gitmodules b/vendor/github.com/oschwald/maxminddb-golang/.gitmodules new file mode 100644 index 000000000000..400b2ab62c0f --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/.gitmodules @@ -0,0 +1,3 @@ +[submodule "test-data"] + path = test-data + url = https://github.com/maxmind/MaxMind-DB.git diff --git a/vendor/github.com/oschwald/maxminddb-golang/.golangci.toml b/vendor/github.com/oschwald/maxminddb-golang/.golangci.toml new file mode 100644 index 000000000000..b4f7e6a1473e --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/.golangci.toml @@ -0,0 +1,472 @@ +[run] + deadline = "10m" + + tests = true + +[linters] + disable-all = true + enable = [ + "asciicheck", + "bidichk", + "bodyclose", + "containedctx", + "contextcheck", + "deadcode", + "depguard", + "durationcheck", + "errcheck", + "errchkjson", + "errname", + "errorlint", + "exportloopref", + "forbidigo", + #"forcetypeassert", + "goconst", + "gocyclo", + "gocritic", + "godot", + "gofumpt", + "gomodguard", + "gosec", + "gosimple", + "govet", + "grouper", + "ineffassign", + "lll", + "makezero", + "maintidx", + "misspell", + "nakedret", + "nilerr", + "noctx", + "nolintlint", + "nosprintfhostport", + "predeclared", + "revive", + "rowserrcheck", + "sqlclosecheck", + "staticcheck", + "structcheck", + "stylecheck", + "tenv", + "tparallel", + "typecheck", + "unconvert", + "unparam", + "unused", + "varcheck", + "vetshadow", + "wastedassign", + ] + +# Please note that we only use depguard for stdlib as gomodguard only +# supports modules currently. See https://github.com/ryancurrah/gomodguard/issues/12 +[linters-settings.depguard] + list-type = "blacklist" + include-go-root = true + packages = [ + # ioutil is deprecated. The functions have been moved elsewhere: + # https://golang.org/doc/go1.16#ioutil + "io/ioutil", + ] + +[linters-settings.errcheck] + # Don't allow setting of error to the blank identifier. If there is a legtimate + # reason, there should be a nolint with an explanation. + check-blank = true + + exclude-functions = [ + # If we are rolling back a transaction, we are often already in an error + # state. + '(*database/sql.Tx).Rollback', + + # It is reasonable to ignore errors if Cleanup fails in most cases. + '(*github.com/google/renameio/v2.PendingFile).Cleanup', + + # We often don't care if removing a file failed (e.g., it doesn't exist) + 'os.Remove', + 'os.RemoveAll', + ] + + # Ignoring Close so that we don't have to have a bunch of + # `defer func() { _ = r.Close() }()` constructs when we + # don't actually care about the error. + ignore = "Close,fmt:.*" + +[linters-settings.errorlint] + errorf = true + asserts = true + comparison = true + +[linters-settings.exhaustive] + default-signifies-exhaustive = true + +[linters-settings.forbidigo] + # Forbid the following identifiers + forbid = [ + "^minFraud*", + "^maxMind*", + ] + +[linters-settings.gocritic] + enabled-checks = [ + "appendAssign", + "appendCombine", + "argOrder", + "assignOp", + "badCall", + "badCond", + "badLock", + "badRegexp", + "badSorting", + "boolExprSimplify", + "builtinShadow", + "builtinShadowDecl", + "captLocal", + "caseOrder", + "codegenComment", + "commentedOutCode", + "commentedOutImport", + "commentFormatting", + "defaultCaseOrder", + # Revive's defer rule already captures this. This caught no extra cases. + # "deferInLoop", + "deferUnlambda", + "deprecatedComment", + "docStub", + "dupArg", + "dupBranchBody", + "dupCase", + "dupImport", + "dupSubExpr", + "dynamicFmtString", + "elseif", + "emptyDecl", + "emptyFallthrough", + "emptyStringTest", + "equalFold", + "evalOrder", + "exitAfterDefer", + "exposedSyncMutex", + "externalErrorReassign", + # Given that all of our code runs on Linux and the / separate should + # work fine, this seems less important. + # "filepathJoin", + "flagDeref", + "flagName", + "hexLiteral", + "ifElseChain", + "importShadow", + "indexAlloc", + "initClause", + "ioutilDeprecated", + "mapKey", + "methodExprCall", + "nestingReduce", + "newDeref", + "nilValReturn", + "octalLiteral", + "offBy1", + "paramTypeCombine", + "preferDecodeRune", + "preferFilepathJoin", + "preferFprint", + "preferStringWriter", + "preferWriteByte", + "ptrToRefParam", + "rangeExprCopy", + "rangeValCopy", + "redundantSprint", + "regexpMust", + "regexpPattern", + # This might be good, but I don't think we want to encourage + # significant changes to regexes as we port stuff from Perl. + # "regexpSimplify", + "ruleguard", + "singleCaseSwitch", + "sliceClear", + "sloppyLen", + # This seems like it might also be good, but a lot of existing code + # fails. + # "sloppyReassign", + "returnAfterHttpError", + "sloppyTypeAssert", + "sortSlice", + "sprintfQuotedString", + "sqlQuery", + "stringsCompare", + "stringXbytes", + "switchTrue", + "syncMapLoadAndDelete", + "timeExprSimplify", + "todoCommentWithoutDetail", + "tooManyResultsChecker", + "truncateCmp", + "typeAssertChain", + "typeDefFirst", + "typeSwitchVar", + "typeUnparen", + "underef", + "unlabelStmt", + "unlambda", + # I am not sure we would want this linter and a lot of existing + # code fails. + # "unnamedResult", + "unnecessaryBlock", + "unnecessaryDefer", + "unslice", + "valSwap", + "weakCond", + "wrapperFunc", + "yodaStyleExpr", + # This requires explanations for "nolint" directives. This would be + # nice for gosec ones, but I am not sure we want it generally unless + # we can get the false positive rate lower. + # "whyNoLint" + ] + +[linters-settings.gofumpt] + extra-rules = true + lang-version = "1.18" + +[linters-settings.govet] + "enable-all" = true + +[linters-settings.lll] + line-length = 120 + tab-width = 4 + +[linters-settings.nolintlint] + allow-leading-space = false + allow-unused = false + allow-no-explanation = ["lll", "misspell"] + require-explanation = true + require-specific = true + +[linters-settings.revive] + ignore-generated-header = true + severity = "warning" + + # This might be nice but it is so common that it is hard + # to enable. + # [[linters-settings.revive.rules]] + # name = "add-constant" + + # [[linters-settings.revive.rules]] + # name = "argument-limit" + + [[linters-settings.revive.rules]] + name = "atomic" + + [[linters-settings.revive.rules]] + name = "bare-return" + + [[linters-settings.revive.rules]] + name = "blank-imports" + + [[linters-settings.revive.rules]] + name = "bool-literal-in-expr" + + [[linters-settings.revive.rules]] + name = "call-to-gc" + + # [[linters-settings.revive.rules]] + # name = "cognitive-complexity" + + # Probably a good rule, but we have a lot of names that + # only have case differences. + # [[linters-settings.revive.rules]] + # name = "confusing-naming" + + # [[linters-settings.revive.rules]] + # name = "confusing-results" + + [[linters-settings.revive.rules]] + name = "constant-logical-expr" + + [[linters-settings.revive.rules]] + name = "context-as-argument" + + [[linters-settings.revive.rules]] + name = "context-keys-type" + + # [[linters-settings.revive.rules]] + # name = "cyclomatic" + + # [[linters-settings.revive.rules]] + # name = "deep-exit" + + [[linters-settings.revive.rules]] + name = "defer" + + [[linters-settings.revive.rules]] + name = "dot-imports" + + [[linters-settings.revive.rules]] + name = "duplicated-imports" + + [[linters-settings.revive.rules]] + name = "early-return" + + [[linters-settings.revive.rules]] + name = "empty-block" + + [[linters-settings.revive.rules]] + name = "empty-lines" + + [[linters-settings.revive.rules]] + name = "errorf" + + [[linters-settings.revive.rules]] + name = "error-naming" + + [[linters-settings.revive.rules]] + name = "error-return" + + [[linters-settings.revive.rules]] + name = "error-strings" + + [[linters-settings.revive.rules]] + name = "exported" + + # [[linters-settings.revive.rules]] + # name = "file-header" + + # We have a lot of flag parameters. This linter probably makes + # a good point, but we would need some cleanup or a lot of nolints. + # [[linters-settings.revive.rules]] + # name = "flag-parameter" + + # [[linters-settings.revive.rules]] + # name = "function-result-limit" + + [[linters-settings.revive.rules]] + name = "get-return" + + [[linters-settings.revive.rules]] + name = "identical-branches" + + [[linters-settings.revive.rules]] + name = "if-return" + + [[linters-settings.revive.rules]] + name = "imports-blacklist" + + [[linters-settings.revive.rules]] + name = "import-shadowing" + + [[linters-settings.revive.rules]] + name = "increment-decrement" + + [[linters-settings.revive.rules]] + name = "indent-error-flow" + + # [[linters-settings.revive.rules]] + # name = "line-length-limit" + + # [[linters-settings.revive.rules]] + # name = "max-public-structs" + + [[linters-settings.revive.rules]] + name = "modifies-parameter" + + [[linters-settings.revive.rules]] + name = "modifies-value-receiver" + + # We frequently use nested structs, particularly in tests. + # [[linters-settings.revive.rules]] + # name = "nested-structs" + + [[linters-settings.revive.rules]] + name = "optimize-operands-order" + + [[linters-settings.revive.rules]] + name = "package-comments" + + [[linters-settings.revive.rules]] + name = "range" + + [[linters-settings.revive.rules]] + name = "range-val-address" + + [[linters-settings.revive.rules]] + name = "range-val-in-closure" + + [[linters-settings.revive.rules]] + name = "receiver-naming" + + [[linters-settings.revive.rules]] + name = "redefines-builtin-id" + + [[linters-settings.revive.rules]] + name = "string-of-int" + + [[linters-settings.revive.rules]] + name = "struct-tag" + + [[linters-settings.revive.rules]] + name = "superfluous-else" + + [[linters-settings.revive.rules]] + name = "time-naming" + + [[linters-settings.revive.rules]] + name = "unconditional-recursion" + + [[linters-settings.revive.rules]] + name = "unexported-naming" + + [[linters-settings.revive.rules]] + name = "unexported-return" + + # This is covered elsewhere and we want to ignore some + # functions such as fmt.Fprintf. + # [[linters-settings.revive.rules]] + # name = "unhandled-error" + + [[linters-settings.revive.rules]] + name = "unnecessary-stmt" + + [[linters-settings.revive.rules]] + name = "unreachable-code" + + [[linters-settings.revive.rules]] + name = "unused-parameter" + + # We generally have unused receivers in tests for meeting the + # requirements of an interface. + # [[linters-settings.revive.rules]] + # name = "unused-receiver" + + # This probably makes sense after we upgrade to 1.18 + # [[linters-settings.revive.rules]] + # name = "use-any" + + [[linters-settings.revive.rules]] + name = "useless-break" + + [[linters-settings.revive.rules]] + name = "var-declaration" + + [[linters-settings.revive.rules]] + name = "var-naming" + + [[linters-settings.revive.rules]] + name = "waitgroup-by-value" + +[linters-settings.unparam] + check-exported = true + +[[issues.exclude-rules]] + linters = [ + "govet" + ] + # we want to enable almost all govet rules. It is easier to just filter out + # the ones we don't want: + # + # * fieldalignment - way too noisy. Although it is very useful in particular + # cases where we are trying to use as little memory as possible, having + # it go off on every struct isn't helpful. + # * shadow - although often useful, it complains about _many_ err + # shadowing assignments and some others where shadowing is clear. + text = "^(fieldalignment|shadow)" diff --git a/vendor/github.com/oschwald/maxminddb-golang/LICENSE b/vendor/github.com/oschwald/maxminddb-golang/LICENSE new file mode 100644 index 000000000000..2969677f1590 --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2015, Gregory J. Oschwald + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/oschwald/maxminddb-golang/README.md b/vendor/github.com/oschwald/maxminddb-golang/README.md new file mode 100644 index 000000000000..9662888bdf9e --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/README.md @@ -0,0 +1,36 @@ +# MaxMind DB Reader for Go # + +[![GoDoc](https://godoc.org/github.com/oschwald/maxminddb-golang?status.svg)](https://godoc.org/github.com/oschwald/maxminddb-golang) + +This is a Go reader for the MaxMind DB format. Although this can be used to +read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and +[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases, +[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level +API for doing so. + +This is not an official MaxMind API. + +## Installation ## + +``` +go get github.com/oschwald/maxminddb-golang +``` + +## Usage ## + +[See GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) for +documentation and examples. + +## Examples ## + +See [GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) or +`example_test.go` for examples. + +## Contributing ## + +Contributions welcome! Please fork the repository and open a pull request +with your changes. + +## License ## + +This is free software, licensed under the ISC License. diff --git a/vendor/github.com/oschwald/maxminddb-golang/decoder.go b/vendor/github.com/oschwald/maxminddb-golang/decoder.go new file mode 100644 index 000000000000..828c57ff1e23 --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/decoder.go @@ -0,0 +1,897 @@ +package maxminddb + +import ( + "encoding/binary" + "math" + "math/big" + "reflect" + "sync" +) + +type decoder struct { + buffer []byte +} + +type dataType int + +const ( + _Extended dataType = iota + _Pointer + _String + _Float64 + _Bytes + _Uint16 + _Uint32 + _Map + _Int32 + _Uint64 + _Uint128 + _Slice + // We don't use the next two. They are placeholders. See the spec + // for more details. + _Container //nolint: deadcode, varcheck // above + _Marker //nolint: deadcode, varcheck // above + _Bool + _Float32 +) + +const ( + // This is the value used in libmaxminddb. + maximumDataStructureDepth = 512 +) + +func (d *decoder) decode(offset uint, result reflect.Value, depth int) (uint, error) { + if depth > maximumDataStructureDepth { + return 0, newInvalidDatabaseError( + "exceeded maximum data structure depth; database is likely corrupt", + ) + } + typeNum, size, newOffset, err := d.decodeCtrlData(offset) + if err != nil { + return 0, err + } + + if typeNum != _Pointer && result.Kind() == reflect.Uintptr { + result.Set(reflect.ValueOf(uintptr(offset))) + return d.nextValueOffset(offset, 1) + } + return d.decodeFromType(typeNum, size, newOffset, result, depth+1) +} + +func (d *decoder) decodeToDeserializer( + offset uint, + dser deserializer, + depth int, + getNext bool, +) (uint, error) { + if depth > maximumDataStructureDepth { + return 0, newInvalidDatabaseError( + "exceeded maximum data structure depth; database is likely corrupt", + ) + } + skip, err := dser.ShouldSkip(uintptr(offset)) + if err != nil { + return 0, err + } + if skip { + if getNext { + return d.nextValueOffset(offset, 1) + } + return 0, nil + } + + typeNum, size, newOffset, err := d.decodeCtrlData(offset) + if err != nil { + return 0, err + } + + return d.decodeFromTypeToDeserializer(typeNum, size, newOffset, dser, depth+1) +} + +func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint, error) { + newOffset := offset + 1 + if offset >= uint(len(d.buffer)) { + return 0, 0, 0, newOffsetError() + } + ctrlByte := d.buffer[offset] + + typeNum := dataType(ctrlByte >> 5) + if typeNum == _Extended { + if newOffset >= uint(len(d.buffer)) { + return 0, 0, 0, newOffsetError() + } + typeNum = dataType(d.buffer[newOffset] + 7) + newOffset++ + } + + var size uint + size, newOffset, err := d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum) + return typeNum, size, newOffset, err +} + +func (d *decoder) sizeFromCtrlByte( + ctrlByte byte, + offset uint, + typeNum dataType, +) (uint, uint, error) { + size := uint(ctrlByte & 0x1f) + if typeNum == _Extended { + return size, offset, nil + } + + var bytesToRead uint + if size < 29 { + return size, offset, nil + } + + bytesToRead = size - 28 + newOffset := offset + bytesToRead + if newOffset > uint(len(d.buffer)) { + return 0, 0, newOffsetError() + } + if size == 29 { + return 29 + uint(d.buffer[offset]), offset + 1, nil + } + + sizeBytes := d.buffer[offset:newOffset] + + switch { + case size == 30: + size = 285 + uintFromBytes(0, sizeBytes) + case size > 30: + size = uintFromBytes(0, sizeBytes) + 65821 + } + return size, newOffset, nil +} + +func (d *decoder) decodeFromType( + dtype dataType, + size uint, + offset uint, + result reflect.Value, + depth int, +) (uint, error) { + result = d.indirect(result) + + // For these types, size has a special meaning + switch dtype { + case _Bool: + return d.unmarshalBool(size, offset, result) + case _Map: + return d.unmarshalMap(size, offset, result, depth) + case _Pointer: + return d.unmarshalPointer(size, offset, result, depth) + case _Slice: + return d.unmarshalSlice(size, offset, result, depth) + } + + // For the remaining types, size is the byte size + if offset+size > uint(len(d.buffer)) { + return 0, newOffsetError() + } + switch dtype { + case _Bytes: + return d.unmarshalBytes(size, offset, result) + case _Float32: + return d.unmarshalFloat32(size, offset, result) + case _Float64: + return d.unmarshalFloat64(size, offset, result) + case _Int32: + return d.unmarshalInt32(size, offset, result) + case _String: + return d.unmarshalString(size, offset, result) + case _Uint16: + return d.unmarshalUint(size, offset, result, 16) + case _Uint32: + return d.unmarshalUint(size, offset, result, 32) + case _Uint64: + return d.unmarshalUint(size, offset, result, 64) + case _Uint128: + return d.unmarshalUint128(size, offset, result) + default: + return 0, newInvalidDatabaseError("unknown type: %d", dtype) + } +} + +func (d *decoder) decodeFromTypeToDeserializer( + dtype dataType, + size uint, + offset uint, + dser deserializer, + depth int, +) (uint, error) { + // For these types, size has a special meaning + switch dtype { + case _Bool: + v, offset := d.decodeBool(size, offset) + return offset, dser.Bool(v) + case _Map: + return d.decodeMapToDeserializer(size, offset, dser, depth) + case _Pointer: + pointer, newOffset, err := d.decodePointer(size, offset) + if err != nil { + return 0, err + } + _, err = d.decodeToDeserializer(pointer, dser, depth, false) + return newOffset, err + case _Slice: + return d.decodeSliceToDeserializer(size, offset, dser, depth) + } + + // For the remaining types, size is the byte size + if offset+size > uint(len(d.buffer)) { + return 0, newOffsetError() + } + switch dtype { + case _Bytes: + v, offset := d.decodeBytes(size, offset) + return offset, dser.Bytes(v) + case _Float32: + v, offset := d.decodeFloat32(size, offset) + return offset, dser.Float32(v) + case _Float64: + v, offset := d.decodeFloat64(size, offset) + return offset, dser.Float64(v) + case _Int32: + v, offset := d.decodeInt(size, offset) + return offset, dser.Int32(int32(v)) + case _String: + v, offset := d.decodeString(size, offset) + return offset, dser.String(v) + case _Uint16: + v, offset := d.decodeUint(size, offset) + return offset, dser.Uint16(uint16(v)) + case _Uint32: + v, offset := d.decodeUint(size, offset) + return offset, dser.Uint32(uint32(v)) + case _Uint64: + v, offset := d.decodeUint(size, offset) + return offset, dser.Uint64(v) + case _Uint128: + v, offset := d.decodeUint128(size, offset) + return offset, dser.Uint128(v) + default: + return 0, newInvalidDatabaseError("unknown type: %d", dtype) + } +} + +func (d *decoder) unmarshalBool(size, offset uint, result reflect.Value) (uint, error) { + if size > 1 { + return 0, newInvalidDatabaseError( + "the MaxMind DB file's data section contains bad data (bool size of %v)", + size, + ) + } + value, newOffset := d.decodeBool(size, offset) + + switch result.Kind() { + case reflect.Bool: + result.SetBool(value) + return newOffset, nil + case reflect.Interface: + if result.NumMethod() == 0 { + result.Set(reflect.ValueOf(value)) + return newOffset, nil + } + } + return newOffset, newUnmarshalTypeError(value, result.Type()) +} + +// indirect follows pointers and create values as necessary. This is +// heavily based on encoding/json as my original version had a subtle +// bug. This method should be considered to be licensed under +// https://golang.org/LICENSE +func (d *decoder) indirect(result reflect.Value) reflect.Value { + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if result.Kind() == reflect.Interface && !result.IsNil() { + e := result.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + result = e + continue + } + } + + if result.Kind() != reflect.Ptr { + break + } + + if result.IsNil() { + result.Set(reflect.New(result.Type().Elem())) + } + + result = result.Elem() + } + return result +} + +var sliceType = reflect.TypeOf([]byte{}) + +func (d *decoder) unmarshalBytes(size, offset uint, result reflect.Value) (uint, error) { + value, newOffset := d.decodeBytes(size, offset) + + switch result.Kind() { + case reflect.Slice: + if result.Type() == sliceType { + result.SetBytes(value) + return newOffset, nil + } + case reflect.Interface: + if result.NumMethod() == 0 { + result.Set(reflect.ValueOf(value)) + return newOffset, nil + } + } + return newOffset, newUnmarshalTypeError(value, result.Type()) +} + +func (d *decoder) unmarshalFloat32(size, offset uint, result reflect.Value) (uint, error) { + if size != 4 { + return 0, newInvalidDatabaseError( + "the MaxMind DB file's data section contains bad data (float32 size of %v)", + size, + ) + } + value, newOffset := d.decodeFloat32(size, offset) + + switch result.Kind() { + case reflect.Float32, reflect.Float64: + result.SetFloat(float64(value)) + return newOffset, nil + case reflect.Interface: + if result.NumMethod() == 0 { + result.Set(reflect.ValueOf(value)) + return newOffset, nil + } + } + return newOffset, newUnmarshalTypeError(value, result.Type()) +} + +func (d *decoder) unmarshalFloat64(size, offset uint, result reflect.Value) (uint, error) { + if size != 8 { + return 0, newInvalidDatabaseError( + "the MaxMind DB file's data section contains bad data (float 64 size of %v)", + size, + ) + } + value, newOffset := d.decodeFloat64(size, offset) + + switch result.Kind() { + case reflect.Float32, reflect.Float64: + if result.OverflowFloat(value) { + return 0, newUnmarshalTypeError(value, result.Type()) + } + result.SetFloat(value) + return newOffset, nil + case reflect.Interface: + if result.NumMethod() == 0 { + result.Set(reflect.ValueOf(value)) + return newOffset, nil + } + } + return newOffset, newUnmarshalTypeError(value, result.Type()) +} + +func (d *decoder) unmarshalInt32(size, offset uint, result reflect.Value) (uint, error) { + if size > 4 { + return 0, newInvalidDatabaseError( + "the MaxMind DB file's data section contains bad data (int32 size of %v)", + size, + ) + } + value, newOffset := d.decodeInt(size, offset) + + switch result.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := int64(value) + if !result.OverflowInt(n) { + result.SetInt(n) + return newOffset, nil + } + case reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64, + reflect.Uintptr: + n := uint64(value) + if !result.OverflowUint(n) { + result.SetUint(n) + return newOffset, nil + } + case reflect.Interface: + if result.NumMethod() == 0 { + result.Set(reflect.ValueOf(value)) + return newOffset, nil + } + } + return newOffset, newUnmarshalTypeError(value, result.Type()) +} + +func (d *decoder) unmarshalMap( + size uint, + offset uint, + result reflect.Value, + depth int, +) (uint, error) { + result = d.indirect(result) + switch result.Kind() { + default: + return 0, newUnmarshalTypeError("map", result.Type()) + case reflect.Struct: + return d.decodeStruct(size, offset, result, depth) + case reflect.Map: + return d.decodeMap(size, offset, result, depth) + case reflect.Interface: + if result.NumMethod() == 0 { + rv := reflect.ValueOf(make(map[string]interface{}, size)) + newOffset, err := d.decodeMap(size, offset, rv, depth) + result.Set(rv) + return newOffset, err + } + return 0, newUnmarshalTypeError("map", result.Type()) + } +} + +func (d *decoder) unmarshalPointer( + size, offset uint, + result reflect.Value, + depth int, +) (uint, error) { + pointer, newOffset, err := d.decodePointer(size, offset) + if err != nil { + return 0, err + } + _, err = d.decode(pointer, result, depth) + return newOffset, err +} + +func (d *decoder) unmarshalSlice( + size uint, + offset uint, + result reflect.Value, + depth int, +) (uint, error) { + switch result.Kind() { + case reflect.Slice: + return d.decodeSlice(size, offset, result, depth) + case reflect.Interface: + if result.NumMethod() == 0 { + a := []interface{}{} + rv := reflect.ValueOf(&a).Elem() + newOffset, err := d.decodeSlice(size, offset, rv, depth) + result.Set(rv) + return newOffset, err + } + } + return 0, newUnmarshalTypeError("array", result.Type()) +} + +func (d *decoder) unmarshalString(size, offset uint, result reflect.Value) (uint, error) { + value, newOffset := d.decodeString(size, offset) + + switch result.Kind() { + case reflect.String: + result.SetString(value) + return newOffset, nil + case reflect.Interface: + if result.NumMethod() == 0 { + result.Set(reflect.ValueOf(value)) + return newOffset, nil + } + } + return newOffset, newUnmarshalTypeError(value, result.Type()) +} + +func (d *decoder) unmarshalUint( + size, offset uint, + result reflect.Value, + uintType uint, +) (uint, error) { + if size > uintType/8 { + return 0, newInvalidDatabaseError( + "the MaxMind DB file's data section contains bad data (uint%v size of %v)", + uintType, + size, + ) + } + + value, newOffset := d.decodeUint(size, offset) + + switch result.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := int64(value) + if !result.OverflowInt(n) { + result.SetInt(n) + return newOffset, nil + } + case reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64, + reflect.Uintptr: + if !result.OverflowUint(value) { + result.SetUint(value) + return newOffset, nil + } + case reflect.Interface: + if result.NumMethod() == 0 { + result.Set(reflect.ValueOf(value)) + return newOffset, nil + } + } + return newOffset, newUnmarshalTypeError(value, result.Type()) +} + +var bigIntType = reflect.TypeOf(big.Int{}) + +func (d *decoder) unmarshalUint128(size, offset uint, result reflect.Value) (uint, error) { + if size > 16 { + return 0, newInvalidDatabaseError( + "the MaxMind DB file's data section contains bad data (uint128 size of %v)", + size, + ) + } + value, newOffset := d.decodeUint128(size, offset) + + switch result.Kind() { + case reflect.Struct: + if result.Type() == bigIntType { + result.Set(reflect.ValueOf(*value)) + return newOffset, nil + } + case reflect.Interface: + if result.NumMethod() == 0 { + result.Set(reflect.ValueOf(value)) + return newOffset, nil + } + } + return newOffset, newUnmarshalTypeError(value, result.Type()) +} + +func (d *decoder) decodeBool(size, offset uint) (bool, uint) { + return size != 0, offset +} + +func (d *decoder) decodeBytes(size, offset uint) ([]byte, uint) { + newOffset := offset + size + bytes := make([]byte, size) + copy(bytes, d.buffer[offset:newOffset]) + return bytes, newOffset +} + +func (d *decoder) decodeFloat64(size, offset uint) (float64, uint) { + newOffset := offset + size + bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset]) + return math.Float64frombits(bits), newOffset +} + +func (d *decoder) decodeFloat32(size, offset uint) (float32, uint) { + newOffset := offset + size + bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset]) + return math.Float32frombits(bits), newOffset +} + +func (d *decoder) decodeInt(size, offset uint) (int, uint) { + newOffset := offset + size + var val int32 + for _, b := range d.buffer[offset:newOffset] { + val = (val << 8) | int32(b) + } + return int(val), newOffset +} + +func (d *decoder) decodeMap( + size uint, + offset uint, + result reflect.Value, + depth int, +) (uint, error) { + if result.IsNil() { + result.Set(reflect.MakeMapWithSize(result.Type(), int(size))) + } + + mapType := result.Type() + keyValue := reflect.New(mapType.Key()).Elem() + elemType := mapType.Elem() + elemKind := elemType.Kind() + var elemValue reflect.Value + for i := uint(0); i < size; i++ { + var key []byte + var err error + key, offset, err = d.decodeKey(offset) + + if err != nil { + return 0, err + } + + if !elemValue.IsValid() || elemKind == reflect.Interface { + elemValue = reflect.New(elemType).Elem() + } + + offset, err = d.decode(offset, elemValue, depth) + if err != nil { + return 0, err + } + + keyValue.SetString(string(key)) + result.SetMapIndex(keyValue, elemValue) + } + return offset, nil +} + +func (d *decoder) decodeMapToDeserializer( + size uint, + offset uint, + dser deserializer, + depth int, +) (uint, error) { + err := dser.StartMap(size) + if err != nil { + return 0, err + } + for i := uint(0); i < size; i++ { + // TODO - implement key/value skipping? + offset, err = d.decodeToDeserializer(offset, dser, depth, true) + if err != nil { + return 0, err + } + + offset, err = d.decodeToDeserializer(offset, dser, depth, true) + if err != nil { + return 0, err + } + } + err = dser.End() + if err != nil { + return 0, err + } + return offset, nil +} + +func (d *decoder) decodePointer( + size uint, + offset uint, +) (uint, uint, error) { + pointerSize := ((size >> 3) & 0x3) + 1 + newOffset := offset + pointerSize + if newOffset > uint(len(d.buffer)) { + return 0, 0, newOffsetError() + } + pointerBytes := d.buffer[offset:newOffset] + var prefix uint + if pointerSize == 4 { + prefix = 0 + } else { + prefix = size & 0x7 + } + unpacked := uintFromBytes(prefix, pointerBytes) + + var pointerValueOffset uint + switch pointerSize { + case 1: + pointerValueOffset = 0 + case 2: + pointerValueOffset = 2048 + case 3: + pointerValueOffset = 526336 + case 4: + pointerValueOffset = 0 + } + + pointer := unpacked + pointerValueOffset + + return pointer, newOffset, nil +} + +func (d *decoder) decodeSlice( + size uint, + offset uint, + result reflect.Value, + depth int, +) (uint, error) { + result.Set(reflect.MakeSlice(result.Type(), int(size), int(size))) + for i := 0; i < int(size); i++ { + var err error + offset, err = d.decode(offset, result.Index(i), depth) + if err != nil { + return 0, err + } + } + return offset, nil +} + +func (d *decoder) decodeSliceToDeserializer( + size uint, + offset uint, + dser deserializer, + depth int, +) (uint, error) { + err := dser.StartSlice(size) + if err != nil { + return 0, err + } + for i := uint(0); i < size; i++ { + offset, err = d.decodeToDeserializer(offset, dser, depth, true) + if err != nil { + return 0, err + } + } + err = dser.End() + if err != nil { + return 0, err + } + return offset, nil +} + +func (d *decoder) decodeString(size, offset uint) (string, uint) { + newOffset := offset + size + return string(d.buffer[offset:newOffset]), newOffset +} + +func (d *decoder) decodeStruct( + size uint, + offset uint, + result reflect.Value, + depth int, +) (uint, error) { + fields := cachedFields(result) + + // This fills in embedded structs + for _, i := range fields.anonymousFields { + _, err := d.unmarshalMap(size, offset, result.Field(i), depth) + if err != nil { + return 0, err + } + } + + // This handles named fields + for i := uint(0); i < size; i++ { + var ( + err error + key []byte + ) + key, offset, err = d.decodeKey(offset) + if err != nil { + return 0, err + } + // The string() does not create a copy due to this compiler + // optimization: https://github.com/golang/go/issues/3512 + j, ok := fields.namedFields[string(key)] + if !ok { + offset, err = d.nextValueOffset(offset, 1) + if err != nil { + return 0, err + } + continue + } + + offset, err = d.decode(offset, result.Field(j), depth) + if err != nil { + return 0, err + } + } + return offset, nil +} + +type fieldsType struct { + namedFields map[string]int + anonymousFields []int +} + +var fieldsMap sync.Map + +func cachedFields(result reflect.Value) *fieldsType { + resultType := result.Type() + + if fields, ok := fieldsMap.Load(resultType); ok { + return fields.(*fieldsType) + } + numFields := resultType.NumField() + namedFields := make(map[string]int, numFields) + var anonymous []int + for i := 0; i < numFields; i++ { + field := resultType.Field(i) + + fieldName := field.Name + if tag := field.Tag.Get("maxminddb"); tag != "" { + if tag == "-" { + continue + } + fieldName = tag + } + if field.Anonymous { + anonymous = append(anonymous, i) + continue + } + namedFields[fieldName] = i + } + fields := &fieldsType{namedFields, anonymous} + fieldsMap.Store(resultType, fields) + + return fields +} + +func (d *decoder) decodeUint(size, offset uint) (uint64, uint) { + newOffset := offset + size + bytes := d.buffer[offset:newOffset] + + var val uint64 + for _, b := range bytes { + val = (val << 8) | uint64(b) + } + return val, newOffset +} + +func (d *decoder) decodeUint128(size, offset uint) (*big.Int, uint) { + newOffset := offset + size + val := new(big.Int) + val.SetBytes(d.buffer[offset:newOffset]) + + return val, newOffset +} + +func uintFromBytes(prefix uint, uintBytes []byte) uint { + val := prefix + for _, b := range uintBytes { + val = (val << 8) | uint(b) + } + return val +} + +// decodeKey decodes a map key into []byte slice. We use a []byte so that we +// can take advantage of https://github.com/golang/go/issues/3512 to avoid +// copying the bytes when decoding a struct. Previously, we achieved this by +// using unsafe. +func (d *decoder) decodeKey(offset uint) ([]byte, uint, error) { + typeNum, size, dataOffset, err := d.decodeCtrlData(offset) + if err != nil { + return nil, 0, err + } + if typeNum == _Pointer { + pointer, ptrOffset, err := d.decodePointer(size, dataOffset) + if err != nil { + return nil, 0, err + } + key, _, err := d.decodeKey(pointer) + return key, ptrOffset, err + } + if typeNum != _String { + return nil, 0, newInvalidDatabaseError("unexpected type when decoding string: %v", typeNum) + } + newOffset := dataOffset + size + if newOffset > uint(len(d.buffer)) { + return nil, 0, newOffsetError() + } + return d.buffer[dataOffset:newOffset], newOffset, nil +} + +// This function is used to skip ahead to the next value without decoding +// the one at the offset passed in. The size bits have different meanings for +// different data types. +func (d *decoder) nextValueOffset(offset, numberToSkip uint) (uint, error) { + if numberToSkip == 0 { + return offset, nil + } + typeNum, size, offset, err := d.decodeCtrlData(offset) + if err != nil { + return 0, err + } + switch typeNum { + case _Pointer: + _, offset, err = d.decodePointer(size, offset) + if err != nil { + return 0, err + } + case _Map: + numberToSkip += 2 * size + case _Slice: + numberToSkip += size + case _Bool: + default: + offset += size + } + return d.nextValueOffset(offset, numberToSkip-1) +} diff --git a/vendor/github.com/oschwald/maxminddb-golang/deserializer.go b/vendor/github.com/oschwald/maxminddb-golang/deserializer.go new file mode 100644 index 000000000000..c6dd68d14ceb --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/deserializer.go @@ -0,0 +1,31 @@ +package maxminddb + +import "math/big" + +// deserializer is an interface for a type that deserializes an MaxMind DB +// data record to some other type. This exists as an alternative to the +// standard reflection API. +// +// This is fundamentally different than the Unmarshaler interface that +// several packages provide. A Deserializer will generally create the +// final struct or value rather than unmarshaling to itself. +// +// This interface and the associated unmarshaling code is EXPERIMENTAL! +// It is not currently covered by any Semantic Versioning guarantees. +// Use at your own risk. +type deserializer interface { + ShouldSkip(offset uintptr) (bool, error) + StartSlice(size uint) error + StartMap(size uint) error + End() error + String(string) error + Float64(float64) error + Bytes([]byte) error + Uint16(uint16) error + Uint32(uint32) error + Int32(int32) error + Uint64(uint64) error + Uint128(*big.Int) error + Bool(bool) error + Float32(float32) error +} diff --git a/vendor/github.com/oschwald/maxminddb-golang/errors.go b/vendor/github.com/oschwald/maxminddb-golang/errors.go new file mode 100644 index 000000000000..132780019bb3 --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/errors.go @@ -0,0 +1,42 @@ +package maxminddb + +import ( + "fmt" + "reflect" +) + +// InvalidDatabaseError is returned when the database contains invalid data +// and cannot be parsed. +type InvalidDatabaseError struct { + message string +} + +func newOffsetError() InvalidDatabaseError { + return InvalidDatabaseError{"unexpected end of database"} +} + +func newInvalidDatabaseError(format string, args ...interface{}) InvalidDatabaseError { + return InvalidDatabaseError{fmt.Sprintf(format, args...)} +} + +func (e InvalidDatabaseError) Error() string { + return e.message +} + +// UnmarshalTypeError is returned when the value in the database cannot be +// assigned to the specified data type. +type UnmarshalTypeError struct { + Value string // stringified copy of the database value that caused the error + Type reflect.Type // type of the value that could not be assign to +} + +func newUnmarshalTypeError(value interface{}, rType reflect.Type) UnmarshalTypeError { + return UnmarshalTypeError{ + Value: fmt.Sprintf("%v", value), + Type: rType, + } +} + +func (e UnmarshalTypeError) Error() string { + return fmt.Sprintf("maxminddb: cannot unmarshal %s into type %s", e.Value, e.Type.String()) +} diff --git a/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go b/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go new file mode 100644 index 000000000000..eeb2e05defe1 --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go @@ -0,0 +1,16 @@ +//go:build !windows && !appengine && !plan9 +// +build !windows,!appengine,!plan9 + +package maxminddb + +import ( + "golang.org/x/sys/unix" +) + +func mmap(fd, length int) (data []byte, err error) { + return unix.Mmap(fd, 0, length, unix.PROT_READ, unix.MAP_SHARED) +} + +func munmap(b []byte) (err error) { + return unix.Munmap(b) +} diff --git a/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go b/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go new file mode 100644 index 000000000000..661250eca00b --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go @@ -0,0 +1,85 @@ +// +build windows,!appengine + +package maxminddb + +// Windows support largely borrowed from mmap-go. +// +// Copyright 2011 Evan Shaw. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +import ( + "errors" + "os" + "reflect" + "sync" + "unsafe" + + "golang.org/x/sys/windows" +) + +type memoryMap []byte + +// Windows +var handleLock sync.Mutex +var handleMap = map[uintptr]windows.Handle{} + +func mmap(fd int, length int) (data []byte, err error) { + h, errno := windows.CreateFileMapping(windows.Handle(fd), nil, + uint32(windows.PAGE_READONLY), 0, uint32(length), nil) + if h == 0 { + return nil, os.NewSyscallError("CreateFileMapping", errno) + } + + addr, errno := windows.MapViewOfFile(h, uint32(windows.FILE_MAP_READ), 0, + 0, uintptr(length)) + if addr == 0 { + return nil, os.NewSyscallError("MapViewOfFile", errno) + } + handleLock.Lock() + handleMap[addr] = h + handleLock.Unlock() + + m := memoryMap{} + dh := m.header() + dh.Data = addr + dh.Len = length + dh.Cap = dh.Len + + return m, nil +} + +func (m *memoryMap) header() *reflect.SliceHeader { + return (*reflect.SliceHeader)(unsafe.Pointer(m)) +} + +func flush(addr, len uintptr) error { + errno := windows.FlushViewOfFile(addr, len) + return os.NewSyscallError("FlushViewOfFile", errno) +} + +func munmap(b []byte) (err error) { + m := memoryMap(b) + dh := m.header() + + addr := dh.Data + length := uintptr(dh.Len) + + flush(addr, length) + err = windows.UnmapViewOfFile(addr) + if err != nil { + return err + } + + handleLock.Lock() + defer handleLock.Unlock() + handle, ok := handleMap[addr] + if !ok { + // should be impossible; we would've errored above + return errors.New("unknown base address") + } + delete(handleMap, addr) + + e := windows.CloseHandle(windows.Handle(handle)) + return os.NewSyscallError("CloseHandle", e) +} diff --git a/vendor/github.com/oschwald/maxminddb-golang/node.go b/vendor/github.com/oschwald/maxminddb-golang/node.go new file mode 100644 index 000000000000..16e8b5f6a0f9 --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/node.go @@ -0,0 +1,58 @@ +package maxminddb + +type nodeReader interface { + readLeft(uint) uint + readRight(uint) uint +} + +type nodeReader24 struct { + buffer []byte +} + +func (n nodeReader24) readLeft(nodeNumber uint) uint { + return (uint(n.buffer[nodeNumber]) << 16) | + (uint(n.buffer[nodeNumber+1]) << 8) | + uint(n.buffer[nodeNumber+2]) +} + +func (n nodeReader24) readRight(nodeNumber uint) uint { + return (uint(n.buffer[nodeNumber+3]) << 16) | + (uint(n.buffer[nodeNumber+4]) << 8) | + uint(n.buffer[nodeNumber+5]) +} + +type nodeReader28 struct { + buffer []byte +} + +func (n nodeReader28) readLeft(nodeNumber uint) uint { + return ((uint(n.buffer[nodeNumber+3]) & 0xF0) << 20) | + (uint(n.buffer[nodeNumber]) << 16) | + (uint(n.buffer[nodeNumber+1]) << 8) | + uint(n.buffer[nodeNumber+2]) +} + +func (n nodeReader28) readRight(nodeNumber uint) uint { + return ((uint(n.buffer[nodeNumber+3]) & 0x0F) << 24) | + (uint(n.buffer[nodeNumber+4]) << 16) | + (uint(n.buffer[nodeNumber+5]) << 8) | + uint(n.buffer[nodeNumber+6]) +} + +type nodeReader32 struct { + buffer []byte +} + +func (n nodeReader32) readLeft(nodeNumber uint) uint { + return (uint(n.buffer[nodeNumber]) << 24) | + (uint(n.buffer[nodeNumber+1]) << 16) | + (uint(n.buffer[nodeNumber+2]) << 8) | + uint(n.buffer[nodeNumber+3]) +} + +func (n nodeReader32) readRight(nodeNumber uint) uint { + return (uint(n.buffer[nodeNumber+4]) << 24) | + (uint(n.buffer[nodeNumber+5]) << 16) | + (uint(n.buffer[nodeNumber+6]) << 8) | + uint(n.buffer[nodeNumber+7]) +} diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader.go b/vendor/github.com/oschwald/maxminddb-golang/reader.go new file mode 100644 index 000000000000..263cf64a0fff --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/reader.go @@ -0,0 +1,310 @@ +// Package maxminddb provides a reader for the MaxMind DB file format. +package maxminddb + +import ( + "bytes" + "errors" + "fmt" + "net" + "reflect" +) + +const ( + // NotFound is returned by LookupOffset when a matched root record offset + // cannot be found. + NotFound = ^uintptr(0) + + dataSectionSeparatorSize = 16 +) + +var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com") + +// Reader holds the data corresponding to the MaxMind DB file. Its only public +// field is Metadata, which contains the metadata from the MaxMind DB file. +// +// All of the methods on Reader are thread-safe. The struct may be safely +// shared across goroutines. +type Reader struct { + hasMappedFile bool + buffer []byte + nodeReader nodeReader + decoder decoder + Metadata Metadata + ipv4Start uint + ipv4StartBitDepth int + nodeOffsetMult uint +} + +// Metadata holds the metadata decoded from the MaxMind DB file. In particular +// it has the format version, the build time as Unix epoch time, the database +// type and description, the IP version supported, and a slice of the natural +// languages included. +type Metadata struct { + BinaryFormatMajorVersion uint `maxminddb:"binary_format_major_version"` + BinaryFormatMinorVersion uint `maxminddb:"binary_format_minor_version"` + BuildEpoch uint `maxminddb:"build_epoch"` + DatabaseType string `maxminddb:"database_type"` + Description map[string]string `maxminddb:"description"` + IPVersion uint `maxminddb:"ip_version"` + Languages []string `maxminddb:"languages"` + NodeCount uint `maxminddb:"node_count"` + RecordSize uint `maxminddb:"record_size"` +} + +// FromBytes takes a byte slice corresponding to a MaxMind DB file and returns +// a Reader structure or an error. +func FromBytes(buffer []byte) (*Reader, error) { + metadataStart := bytes.LastIndex(buffer, metadataStartMarker) + + if metadataStart == -1 { + return nil, newInvalidDatabaseError("error opening database: invalid MaxMind DB file") + } + + metadataStart += len(metadataStartMarker) + metadataDecoder := decoder{buffer[metadataStart:]} + + var metadata Metadata + + rvMetdata := reflect.ValueOf(&metadata) + _, err := metadataDecoder.decode(0, rvMetdata, 0) + if err != nil { + return nil, err + } + + searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4 + dataSectionStart := searchTreeSize + dataSectionSeparatorSize + dataSectionEnd := uint(metadataStart - len(metadataStartMarker)) + if dataSectionStart > dataSectionEnd { + return nil, newInvalidDatabaseError("the MaxMind DB contains invalid metadata") + } + d := decoder{ + buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)], + } + + nodeBuffer := buffer[:searchTreeSize] + var nodeReader nodeReader + switch metadata.RecordSize { + case 24: + nodeReader = nodeReader24{buffer: nodeBuffer} + case 28: + nodeReader = nodeReader28{buffer: nodeBuffer} + case 32: + nodeReader = nodeReader32{buffer: nodeBuffer} + default: + return nil, newInvalidDatabaseError("unknown record size: %d", metadata.RecordSize) + } + + reader := &Reader{ + buffer: buffer, + nodeReader: nodeReader, + decoder: d, + Metadata: metadata, + ipv4Start: 0, + nodeOffsetMult: metadata.RecordSize / 4, + } + + reader.setIPv4Start() + + return reader, err +} + +func (r *Reader) setIPv4Start() { + if r.Metadata.IPVersion != 6 { + return + } + + nodeCount := r.Metadata.NodeCount + + node := uint(0) + i := 0 + for ; i < 96 && node < nodeCount; i++ { + node = r.nodeReader.readLeft(node * r.nodeOffsetMult) + } + r.ipv4Start = node + r.ipv4StartBitDepth = i +} + +// Lookup retrieves the database record for ip and stores it in the value +// pointed to by result. If result is nil or not a pointer, an error is +// returned. If the data in the database record cannot be stored in result +// because of type differences, an UnmarshalTypeError is returned. If the +// database is invalid or otherwise cannot be read, an InvalidDatabaseError +// is returned. +func (r *Reader) Lookup(ip net.IP, result interface{}) error { + if r.buffer == nil { + return errors.New("cannot call Lookup on a closed database") + } + pointer, _, _, err := r.lookupPointer(ip) + if pointer == 0 || err != nil { + return err + } + return r.retrieveData(pointer, result) +} + +// LookupNetwork retrieves the database record for ip and stores it in the +// value pointed to by result. The network returned is the network associated +// with the data record in the database. The ok return value indicates whether +// the database contained a record for the ip. +// +// If result is nil or not a pointer, an error is returned. If the data in the +// database record cannot be stored in result because of type differences, an +// UnmarshalTypeError is returned. If the database is invalid or otherwise +// cannot be read, an InvalidDatabaseError is returned. +func (r *Reader) LookupNetwork( + ip net.IP, + result interface{}, +) (network *net.IPNet, ok bool, err error) { + if r.buffer == nil { + return nil, false, errors.New("cannot call Lookup on a closed database") + } + pointer, prefixLength, ip, err := r.lookupPointer(ip) + + network = r.cidr(ip, prefixLength) + if pointer == 0 || err != nil { + return network, false, err + } + + return network, true, r.retrieveData(pointer, result) +} + +// LookupOffset maps an argument net.IP to a corresponding record offset in the +// database. NotFound is returned if no such record is found, and a record may +// otherwise be extracted by passing the returned offset to Decode. LookupOffset +// is an advanced API, which exists to provide clients with a means to cache +// previously-decoded records. +func (r *Reader) LookupOffset(ip net.IP) (uintptr, error) { + if r.buffer == nil { + return 0, errors.New("cannot call LookupOffset on a closed database") + } + pointer, _, _, err := r.lookupPointer(ip) + if pointer == 0 || err != nil { + return NotFound, err + } + return r.resolveDataPointer(pointer) +} + +func (r *Reader) cidr(ip net.IP, prefixLength int) *net.IPNet { + // This is necessary as the node that the IPv4 start is at may + // be at a bit depth that is less that 96, i.e., ipv4Start points + // to a leaf node. For instance, if a record was inserted at ::/8, + // the ipv4Start would point directly at the leaf node for the + // record and would have a bit depth of 8. This would not happen + // with databases currently distributed by MaxMind as all of them + // have an IPv4 subtree that is greater than a single node. + if r.Metadata.IPVersion == 6 && + len(ip) == net.IPv4len && + r.ipv4StartBitDepth != 96 { + return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(r.ipv4StartBitDepth, 128)} + } + + mask := net.CIDRMask(prefixLength, len(ip)*8) + return &net.IPNet{IP: ip.Mask(mask), Mask: mask} +} + +// Decode the record at |offset| into |result|. The result value pointed to +// must be a data value that corresponds to a record in the database. This may +// include a struct representation of the data, a map capable of holding the +// data or an empty interface{} value. +// +// If result is a pointer to a struct, the struct need not include a field +// for every value that may be in the database. If a field is not present in +// the structure, the decoder will not decode that field, reducing the time +// required to decode the record. +// +// As a special case, a struct field of type uintptr will be used to capture +// the offset of the value. Decode may later be used to extract the stored +// value from the offset. MaxMind DBs are highly normalized: for example in +// the City database, all records of the same country will reference a +// single representative record for that country. This uintptr behavior allows +// clients to leverage this normalization in their own sub-record caching. +func (r *Reader) Decode(offset uintptr, result interface{}) error { + if r.buffer == nil { + return errors.New("cannot call Decode on a closed database") + } + return r.decode(offset, result) +} + +func (r *Reader) decode(offset uintptr, result interface{}) error { + rv := reflect.ValueOf(result) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return errors.New("result param must be a pointer") + } + + if dser, ok := result.(deserializer); ok { + _, err := r.decoder.decodeToDeserializer(uint(offset), dser, 0, false) + return err + } + + _, err := r.decoder.decode(uint(offset), rv, 0) + return err +} + +func (r *Reader) lookupPointer(ip net.IP) (uint, int, net.IP, error) { + if ip == nil { + return 0, 0, nil, errors.New("IP passed to Lookup cannot be nil") + } + + ipV4Address := ip.To4() + if ipV4Address != nil { + ip = ipV4Address + } + if len(ip) == 16 && r.Metadata.IPVersion == 4 { + return 0, 0, ip, fmt.Errorf( + "error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database", + ip.String(), + ) + } + + bitCount := uint(len(ip) * 8) + + var node uint + if bitCount == 32 { + node = r.ipv4Start + } + node, prefixLength := r.traverseTree(ip, node, bitCount) + + nodeCount := r.Metadata.NodeCount + if node == nodeCount { + // Record is empty + return 0, prefixLength, ip, nil + } else if node > nodeCount { + return node, prefixLength, ip, nil + } + + return 0, prefixLength, ip, newInvalidDatabaseError("invalid node in search tree") +} + +func (r *Reader) traverseTree(ip net.IP, node, bitCount uint) (uint, int) { + nodeCount := r.Metadata.NodeCount + + i := uint(0) + for ; i < bitCount && node < nodeCount; i++ { + bit := uint(1) & (uint(ip[i>>3]) >> (7 - (i % 8))) + + offset := node * r.nodeOffsetMult + if bit == 0 { + node = r.nodeReader.readLeft(offset) + } else { + node = r.nodeReader.readRight(offset) + } + } + + return node, int(i) +} + +func (r *Reader) retrieveData(pointer uint, result interface{}) error { + offset, err := r.resolveDataPointer(pointer) + if err != nil { + return err + } + return r.decode(offset, result) +} + +func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) { + resolved := uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize) + + if resolved >= uintptr(len(r.buffer)) { + return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt") + } + return resolved, nil +} diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go b/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go new file mode 100644 index 000000000000..c6385d8001b8 --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go @@ -0,0 +1,28 @@ +// +build appengine plan9 + +package maxminddb + +import "io/ioutil" + +// Open takes a string path to a MaxMind DB file and returns a Reader +// structure or an error. The database file is opened using a memory map, +// except on Google App Engine where mmap is not supported; there the database +// is loaded into memory. Use the Close method on the Reader object to return +// the resources to the system. +func Open(file string) (*Reader, error) { + bytes, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + + return FromBytes(bytes) +} + +// Close unmaps the database file from virtual memory and returns the +// resources to the system. If called on a Reader opened using FromBytes +// or Open on Google App Engine, this method sets the underlying buffer +// to nil, returning the resources to the system. +func (r *Reader) Close() error { + r.buffer = nil + return nil +} diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader_other.go b/vendor/github.com/oschwald/maxminddb-golang/reader_other.go new file mode 100644 index 000000000000..0ed9de10ff8d --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/reader_other.go @@ -0,0 +1,66 @@ +//go:build !appengine && !plan9 +// +build !appengine,!plan9 + +package maxminddb + +import ( + "os" + "runtime" +) + +// Open takes a string path to a MaxMind DB file and returns a Reader +// structure or an error. The database file is opened using a memory map, +// except on Google App Engine where mmap is not supported; there the database +// is loaded into memory. Use the Close method on the Reader object to return +// the resources to the system. +func Open(file string) (*Reader, error) { + mapFile, err := os.Open(file) + if err != nil { + _ = mapFile.Close() + return nil, err + } + + stats, err := mapFile.Stat() + if err != nil { + _ = mapFile.Close() + return nil, err + } + + fileSize := int(stats.Size()) + mmap, err := mmap(int(mapFile.Fd()), fileSize) + if err != nil { + _ = mapFile.Close() + return nil, err + } + + if err := mapFile.Close(); err != nil { + //nolint:errcheck // we prefer to return the original error + munmap(mmap) + return nil, err + } + + reader, err := FromBytes(mmap) + if err != nil { + //nolint:errcheck // we prefer to return the original error + munmap(mmap) + return nil, err + } + + reader.hasMappedFile = true + runtime.SetFinalizer(reader, (*Reader).Close) + return reader, nil +} + +// Close unmaps the database file from virtual memory and returns the +// resources to the system. If called on a Reader opened using FromBytes +// or Open on Google App Engine, this method does nothing. +func (r *Reader) Close() error { + var err error + if r.hasMappedFile { + runtime.SetFinalizer(r, nil) + r.hasMappedFile = false + err = munmap(r.buffer) + } + r.buffer = nil + return err +} diff --git a/vendor/github.com/oschwald/maxminddb-golang/traverse.go b/vendor/github.com/oschwald/maxminddb-golang/traverse.go new file mode 100644 index 000000000000..7009ec1466b5 --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/traverse.go @@ -0,0 +1,205 @@ +package maxminddb + +import ( + "fmt" + "net" +) + +// Internal structure used to keep track of nodes we still need to visit. +type netNode struct { + ip net.IP + bit uint + pointer uint +} + +// Networks represents a set of subnets that we are iterating over. +type Networks struct { + reader *Reader + nodes []netNode // Nodes we still have to visit. + lastNode netNode + err error + + skipAliasedNetworks bool +} + +var ( + allIPv4 = &net.IPNet{IP: make(net.IP, 4), Mask: net.CIDRMask(0, 32)} + allIPv6 = &net.IPNet{IP: make(net.IP, 16), Mask: net.CIDRMask(0, 128)} +) + +// NetworksOption are options for Networks and NetworksWithin. +type NetworksOption func(*Networks) + +// SkipAliasedNetworks is an option for Networks and NetworksWithin that +// makes them not iterate over aliases of the IPv4 subtree in an IPv6 +// database, e.g., ::ffff:0:0/96, 2001::/32, and 2002::/16. +// +// You most likely want to set this. The only reason it isn't the default +// behavior is to provide backwards compatibility to existing users. +func SkipAliasedNetworks(networks *Networks) { + networks.skipAliasedNetworks = true +} + +// Networks returns an iterator that can be used to traverse all networks in +// the database. +// +// Please note that a MaxMind DB may map IPv4 networks into several locations +// in an IPv6 database. This iterator will iterate over all of these locations +// separately. To only iterate over the IPv4 networks once, use the +// SkipAliasedNetworks option. +func (r *Reader) Networks(options ...NetworksOption) *Networks { + var networks *Networks + if r.Metadata.IPVersion == 6 { + networks = r.NetworksWithin(allIPv6, options...) + } else { + networks = r.NetworksWithin(allIPv4, options...) + } + + return networks +} + +// NetworksWithin returns an iterator that can be used to traverse all networks +// in the database which are contained in a given network. +// +// Please note that a MaxMind DB may map IPv4 networks into several locations +// in an IPv6 database. This iterator will iterate over all of these locations +// separately. To only iterate over the IPv4 networks once, use the +// SkipAliasedNetworks option. +// +// If the provided network is contained within a network in the database, the +// iterator will iterate over exactly one network, the containing network. +func (r *Reader) NetworksWithin(network *net.IPNet, options ...NetworksOption) *Networks { + if r.Metadata.IPVersion == 4 && network.IP.To4() == nil { + return &Networks{ + err: fmt.Errorf( + "error getting networks with '%s': you attempted to use an IPv6 network in an IPv4-only database", + network.String(), + ), + } + } + + networks := &Networks{reader: r} + for _, option := range options { + option(networks) + } + + ip := network.IP + prefixLength, _ := network.Mask.Size() + + if r.Metadata.IPVersion == 6 && len(ip) == net.IPv4len { + if networks.skipAliasedNetworks { + ip = net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ip[0], ip[1], ip[2], ip[3]} + } else { + ip = ip.To16() + } + prefixLength += 96 + } + + pointer, bit := r.traverseTree(ip, 0, uint(prefixLength)) + networks.nodes = []netNode{ + { + ip: ip, + bit: uint(bit), + pointer: pointer, + }, + } + + return networks +} + +// Next prepares the next network for reading with the Network method. It +// returns true if there is another network to be processed and false if there +// are no more networks or if there is an error. +func (n *Networks) Next() bool { + if n.err != nil { + return false + } + for len(n.nodes) > 0 { + node := n.nodes[len(n.nodes)-1] + n.nodes = n.nodes[:len(n.nodes)-1] + + for node.pointer != n.reader.Metadata.NodeCount { + // This skips IPv4 aliases without hardcoding the networks that the writer + // currently aliases. + if n.skipAliasedNetworks && n.reader.ipv4Start != 0 && + node.pointer == n.reader.ipv4Start && !isInIPv4Subtree(node.ip) { + break + } + + if node.pointer > n.reader.Metadata.NodeCount { + n.lastNode = node + return true + } + ipRight := make(net.IP, len(node.ip)) + copy(ipRight, node.ip) + if len(ipRight) <= int(node.bit>>3) { + n.err = newInvalidDatabaseError( + "invalid search tree at %v/%v", ipRight, node.bit) + return false + } + ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8)) + + offset := node.pointer * n.reader.nodeOffsetMult + rightPointer := n.reader.nodeReader.readRight(offset) + + node.bit++ + n.nodes = append(n.nodes, netNode{ + pointer: rightPointer, + ip: ipRight, + bit: node.bit, + }) + + node.pointer = n.reader.nodeReader.readLeft(offset) + } + } + + return false +} + +// Network returns the current network or an error if there is a problem +// decoding the data for the network. It takes a pointer to a result value to +// decode the network's data into. +func (n *Networks) Network(result interface{}) (*net.IPNet, error) { + if n.err != nil { + return nil, n.err + } + if err := n.reader.retrieveData(n.lastNode.pointer, result); err != nil { + return nil, err + } + + ip := n.lastNode.ip + prefixLength := int(n.lastNode.bit) + + // We do this because uses of SkipAliasedNetworks expect the IPv4 networks + // to be returned as IPv4 networks. If we are not skipping aliased + // networks, then the user will get IPv4 networks from the ::FFFF:0:0/96 + // network as Go automatically converts those. + if n.skipAliasedNetworks && isInIPv4Subtree(ip) { + ip = ip[12:] + prefixLength -= 96 + } + + return &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(prefixLength, len(ip)*8), + }, nil +} + +// Err returns an error, if any, that was encountered during iteration. +func (n *Networks) Err() error { + return n.err +} + +// isInIPv4Subtree returns true if the IP is an IPv6 address in the database's +// IPv4 subtree. +func isInIPv4Subtree(ip net.IP) bool { + if len(ip) != 16 { + return false + } + for i := 0; i < 12; i++ { + if ip[i] != 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/oschwald/maxminddb-golang/verifier.go b/vendor/github.com/oschwald/maxminddb-golang/verifier.go new file mode 100644 index 000000000000..88381d742f87 --- /dev/null +++ b/vendor/github.com/oschwald/maxminddb-golang/verifier.go @@ -0,0 +1,201 @@ +package maxminddb + +import ( + "reflect" + "runtime" +) + +type verifier struct { + reader *Reader +} + +// Verify checks that the database is valid. It validates the search tree, +// the data section, and the metadata section. This verifier is stricter than +// the specification and may return errors on databases that are readable. +func (r *Reader) Verify() error { + v := verifier{r} + if err := v.verifyMetadata(); err != nil { + return err + } + + err := v.verifyDatabase() + runtime.KeepAlive(v.reader) + return err +} + +func (v *verifier) verifyMetadata() error { + metadata := v.reader.Metadata + + if metadata.BinaryFormatMajorVersion != 2 { + return testError( + "binary_format_major_version", + 2, + metadata.BinaryFormatMajorVersion, + ) + } + + if metadata.BinaryFormatMinorVersion != 0 { + return testError( + "binary_format_minor_version", + 0, + metadata.BinaryFormatMinorVersion, + ) + } + + if metadata.DatabaseType == "" { + return testError( + "database_type", + "non-empty string", + metadata.DatabaseType, + ) + } + + if len(metadata.Description) == 0 { + return testError( + "description", + "non-empty slice", + metadata.Description, + ) + } + + if metadata.IPVersion != 4 && metadata.IPVersion != 6 { + return testError( + "ip_version", + "4 or 6", + metadata.IPVersion, + ) + } + + if metadata.RecordSize != 24 && + metadata.RecordSize != 28 && + metadata.RecordSize != 32 { + return testError( + "record_size", + "24, 28, or 32", + metadata.RecordSize, + ) + } + + if metadata.NodeCount == 0 { + return testError( + "node_count", + "positive integer", + metadata.NodeCount, + ) + } + return nil +} + +func (v *verifier) verifyDatabase() error { + offsets, err := v.verifySearchTree() + if err != nil { + return err + } + + if err := v.verifyDataSectionSeparator(); err != nil { + return err + } + + return v.verifyDataSection(offsets) +} + +func (v *verifier) verifySearchTree() (map[uint]bool, error) { + offsets := make(map[uint]bool) + + it := v.reader.Networks() + for it.Next() { + offset, err := v.reader.resolveDataPointer(it.lastNode.pointer) + if err != nil { + return nil, err + } + offsets[uint(offset)] = true + } + if err := it.Err(); err != nil { + return nil, err + } + return offsets, nil +} + +func (v *verifier) verifyDataSectionSeparator() error { + separatorStart := v.reader.Metadata.NodeCount * v.reader.Metadata.RecordSize / 4 + + separator := v.reader.buffer[separatorStart : separatorStart+dataSectionSeparatorSize] + + for _, b := range separator { + if b != 0 { + return newInvalidDatabaseError("unexpected byte in data separator: %v", separator) + } + } + return nil +} + +func (v *verifier) verifyDataSection(offsets map[uint]bool) error { + pointerCount := len(offsets) + + decoder := v.reader.decoder + + var offset uint + bufferLen := uint(len(decoder.buffer)) + for offset < bufferLen { + var data interface{} + rv := reflect.ValueOf(&data) + newOffset, err := decoder.decode(offset, rv, 0) + if err != nil { + return newInvalidDatabaseError( + "received decoding error (%v) at offset of %v", + err, + offset, + ) + } + if newOffset <= offset { + return newInvalidDatabaseError( + "data section offset unexpectedly went from %v to %v", + offset, + newOffset, + ) + } + + pointer := offset + + if _, ok := offsets[pointer]; !ok { + return newInvalidDatabaseError( + "found data (%v) at %v that the search tree does not point to", + data, + pointer, + ) + } + delete(offsets, pointer) + + offset = newOffset + } + + if offset != bufferLen { + return newInvalidDatabaseError( + "unexpected data at the end of the data section (last offset: %v, end: %v)", + offset, + bufferLen, + ) + } + + if len(offsets) != 0 { + return newInvalidDatabaseError( + "found %v pointers (of %v) in the search tree that we did not see in the data section", + len(offsets), + pointerCount, + ) + } + return nil +} + +func testError( + field string, + expected interface{}, + actual interface{}, +) error { + return newInvalidDatabaseError( + "%v - Expected: %v Actual: %v", + field, + expected, + actual, + ) +} diff --git a/vendor/go4.org/unsafe/assume-no-moving-gc/LICENSE b/vendor/github.com/pkg/browser/LICENSE similarity index 60% rename from vendor/go4.org/unsafe/assume-no-moving-gc/LICENSE rename to vendor/github.com/pkg/browser/LICENSE index b0ab8921dc3b..65f78fb62910 100644 --- a/vendor/go4.org/unsafe/assume-no-moving-gc/LICENSE +++ b/vendor/github.com/pkg/browser/LICENSE @@ -1,21 +1,15 @@ -BSD 3-Clause License - -Copyright (c) 2020, Brad Fitzpatrick +Copyright (c) 2014, Dave Cheney All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE diff --git a/vendor/github.com/pkg/browser/README.md b/vendor/github.com/pkg/browser/README.md new file mode 100644 index 000000000000..72b1976e3035 --- /dev/null +++ b/vendor/github.com/pkg/browser/README.md @@ -0,0 +1,55 @@ + +# browser + import "github.com/pkg/browser" + +Package browser provides helpers to open files, readers, and urls in a browser window. + +The choice of which browser is started is entirely client dependant. + + + + + +## Variables +``` go +var Stderr io.Writer = os.Stderr +``` +Stderr is the io.Writer to which executed commands write standard error. + +``` go +var Stdout io.Writer = os.Stdout +``` +Stdout is the io.Writer to which executed commands write standard output. + + +## func OpenFile +``` go +func OpenFile(path string) error +``` +OpenFile opens new browser window for the file path. + + +## func OpenReader +``` go +func OpenReader(r io.Reader) error +``` +OpenReader consumes the contents of r and presents the +results in a new browser window. + + +## func OpenURL +``` go +func OpenURL(url string) error +``` +OpenURL opens a new browser window pointing to url. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/github.com/pkg/browser/browser.go b/vendor/github.com/pkg/browser/browser.go new file mode 100644 index 000000000000..d7969d74d80d --- /dev/null +++ b/vendor/github.com/pkg/browser/browser.go @@ -0,0 +1,57 @@ +// Package browser provides helpers to open files, readers, and urls in a browser window. +// +// The choice of which browser is started is entirely client dependant. +package browser + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" +) + +// Stdout is the io.Writer to which executed commands write standard output. +var Stdout io.Writer = os.Stdout + +// Stderr is the io.Writer to which executed commands write standard error. +var Stderr io.Writer = os.Stderr + +// OpenFile opens new browser window for the file path. +func OpenFile(path string) error { + path, err := filepath.Abs(path) + if err != nil { + return err + } + return OpenURL("file://" + path) +} + +// OpenReader consumes the contents of r and presents the +// results in a new browser window. +func OpenReader(r io.Reader) error { + f, err := ioutil.TempFile("", "browser.*.html") + if err != nil { + return fmt.Errorf("browser: could not create temporary file: %v", err) + } + if _, err := io.Copy(f, r); err != nil { + f.Close() + return fmt.Errorf("browser: caching temporary file failed: %v", err) + } + if err := f.Close(); err != nil { + return fmt.Errorf("browser: caching temporary file failed: %v", err) + } + return OpenFile(f.Name()) +} + +// OpenURL opens a new browser window pointing to url. +func OpenURL(url string) error { + return openBrowser(url) +} + +func runCmd(prog string, args ...string) error { + cmd := exec.Command(prog, args...) + cmd.Stdout = Stdout + cmd.Stderr = Stderr + return cmd.Run() +} diff --git a/vendor/github.com/pkg/browser/browser_darwin.go b/vendor/github.com/pkg/browser/browser_darwin.go new file mode 100644 index 000000000000..8507cf7c2b45 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_darwin.go @@ -0,0 +1,5 @@ +package browser + +func openBrowser(url string) error { + return runCmd("open", url) +} diff --git a/vendor/github.com/pkg/browser/browser_freebsd.go b/vendor/github.com/pkg/browser/browser_freebsd.go new file mode 100644 index 000000000000..4fc7ff0761b4 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_freebsd.go @@ -0,0 +1,14 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from ports(8)") + } + return err +} diff --git a/vendor/github.com/pkg/browser/browser_linux.go b/vendor/github.com/pkg/browser/browser_linux.go new file mode 100644 index 000000000000..d26cdddf9c15 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_linux.go @@ -0,0 +1,21 @@ +package browser + +import ( + "os/exec" + "strings" +) + +func openBrowser(url string) error { + providers := []string{"xdg-open", "x-www-browser", "www-browser"} + + // There are multiple possible providers to open a browser on linux + // One of them is xdg-open, another is x-www-browser, then there's www-browser, etc. + // Look for one that exists and run it + for _, provider := range providers { + if _, err := exec.LookPath(provider); err == nil { + return runCmd(provider, url) + } + } + + return &exec.Error{Name: strings.Join(providers, ","), Err: exec.ErrNotFound} +} diff --git a/vendor/github.com/pkg/browser/browser_netbsd.go b/vendor/github.com/pkg/browser/browser_netbsd.go new file mode 100644 index 000000000000..65a5e5a29342 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_netbsd.go @@ -0,0 +1,14 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from pkgsrc(7)") + } + return err +} diff --git a/vendor/github.com/pkg/browser/browser_openbsd.go b/vendor/github.com/pkg/browser/browser_openbsd.go new file mode 100644 index 000000000000..4fc7ff0761b4 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_openbsd.go @@ -0,0 +1,14 @@ +package browser + +import ( + "errors" + "os/exec" +) + +func openBrowser(url string) error { + err := runCmd("xdg-open", url) + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + return errors.New("xdg-open: command not found - install xdg-utils from ports(8)") + } + return err +} diff --git a/vendor/github.com/pkg/browser/browser_unsupported.go b/vendor/github.com/pkg/browser/browser_unsupported.go new file mode 100644 index 000000000000..7c5c17d34d26 --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!darwin,!openbsd,!freebsd,!netbsd + +package browser + +import ( + "fmt" + "runtime" +) + +func openBrowser(url string) error { + return fmt.Errorf("openBrowser: unsupported operating system: %v", runtime.GOOS) +} diff --git a/vendor/github.com/pkg/browser/browser_windows.go b/vendor/github.com/pkg/browser/browser_windows.go new file mode 100644 index 000000000000..63e192959a5e --- /dev/null +++ b/vendor/github.com/pkg/browser/browser_windows.go @@ -0,0 +1,7 @@ +package browser + +import "golang.org/x/sys/windows" + +func openBrowser(url string) error { + return windows.ShellExecute(0, nil, windows.StringToUTF16Ptr(url), nil, nil, windows.SW_SHOWNORMAL) +} diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 6c8e3e219797..e358db69c5d3 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -55,19 +55,22 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.13.0 +PROMU_VERSION ?= 0.14.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz +SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.45.2 +GOLANGCI_LINT_VERSION ?= v1.49.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. - ifeq (,$(CIRCLE_JOB)) + ifneq (,$(SKIP_GOLANGCI_LINT)) + GOLANGCI_LINT := + else ifeq (,$(CIRCLE_JOB)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index ff6b927da159..06968ca2ed40 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -380,6 +380,42 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { return cpuinfo, nil } +func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, errors.New("invalid cpuinfo file: " + firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "CPU Family": + cpuinfo[i].CPUFamily = field[1] + case "Model Name": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go new file mode 100644 index 000000000000..d88442f0edfd --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoLoong diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index ea41bf2ca1e2..a6b2b3127cb1 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x -// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x +//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go index d31a82600f67..f9d961e44179 100644 --- a/vendor/github.com/prometheus/procfs/doc.go +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -16,30 +16,29 @@ // // Example: // -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.Stat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.Stat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } package procfs diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index f7a828bb1da7..0c482c18ccfe 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -284,7 +284,8 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { } // parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] +// +// device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { return nil, fmt.Errorf("invalid device entry: %v", ss) diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index a94f86dc4ae6..06b7b8f21638 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -27,8 +27,9 @@ import ( // For the proc file format details, // See: // * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 -// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. +// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 +// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 +// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 // SoftnetStat contains a single row of data from /proc/net/softnet_stat. type SoftnetStat struct { @@ -38,6 +39,18 @@ type SoftnetStat struct { Dropped uint32 // Number of times processing packets ran out of quota. TimeSqueezed uint32 + // Number of collision occur while obtaining device lock while transmitting. + CPUCollision uint32 + // Number of times cpu woken up received_rps. + ReceivedRps uint32 + // number of times flow limit has been reached. + FlowLimitCount uint32 + // Softnet backlog status. + SoftnetBacklogLen uint32 + // CPU id owning this softnet_data. + Index uint32 + // softnet_data's Width. + Width int } var softNetProcFile = "net/softnet_stat" @@ -66,22 +79,57 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { for s.Scan() { columns := strings.Fields(s.Text()) width := len(columns) + softnetStat := SoftnetStat{} if width < minColumns { return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) } - // We only parse the first three columns at the moment. - us, err := parseHexUint32s(columns[0:3]) - if err != nil { - return nil, err + // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 + if width >= minColumns { + us, err := parseHexUint32s(columns[0:9]) + if err != nil { + return nil, err + } + + softnetStat.Processed = us[0] + softnetStat.Dropped = us[1] + softnetStat.TimeSqueezed = us[2] + softnetStat.CPUCollision = us[8] + } + + // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 + if width >= 10 { + us, err := parseHexUint32s(columns[9:10]) + if err != nil { + return nil, err + } + + softnetStat.ReceivedRps = us[0] } - stats = append(stats, SoftnetStat{ - Processed: us[0], - Dropped: us[1], - TimeSqueezed: us[2], - }) + // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 + if width >= 11 { + us, err := parseHexUint32s(columns[10:11]) + if err != nil { + return nil, err + } + + softnetStat.FlowLimitCount = us[0] + } + + // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 + if width >= 13 { + us, err := parseHexUint32s(columns[11:13]) + if err != nil { + return nil, err + } + + softnetStat.SoftnetBacklogLen = us[0] + softnetStat.Index = us[1] + } + softnetStat.Width = width + stats = append(stats, softnetStat) } return stats, nil diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index dcea9c5a671f..5cc40aef55bf 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -15,6 +15,7 @@ package procfs import ( "bufio" + "io" "os" "path/filepath" "strconv" @@ -42,27 +43,43 @@ func (fs FS) NetStat() ([]NetStat, error) { return nil, err } - netStatFile := NetStat{ - Filename: filepath.Base(filePath), - Stats: make(map[string][]uint64), + procNetstat, err := parseNetstat(file) + if err != nil { + return nil, err + } + procNetstat.Filename = filepath.Base(filePath) + + netStatsTotal = append(netStatsTotal, procNetstat) + } + return netStatsTotal, nil +} + +// parseNetstat parses the metrics from `/proc/net/stat/` file +// and returns a NetStat structure. +func parseNetstat(r io.Reader) (NetStat, error) { + var ( + scanner = bufio.NewScanner(r) + netStat = NetStat{ + Stats: make(map[string][]uint64), } - scanner := bufio.NewScanner(file) - scanner.Scan() - // First string is always a header for stats - var headers []string - headers = append(headers, strings.Fields(scanner.Text())...) + ) + + scanner.Scan() - // Other strings represent per-CPU counters - for scanner.Scan() { - for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 64) - if err != nil { - return nil, err - } - netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value) + // First string is always a header for stats + var headers []string + headers = append(headers, strings.Fields(scanner.Text())...) + + // Other strings represent per-CPU counters + for scanner.Scan() { + for num, counter := range strings.Fields(scanner.Text()) { + value, err := strconv.ParseUint(counter, 16, 64) + if err != nil { + return NetStat{}, err } + netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value) } - netStatsTotal = append(netStatsTotal, netStatFile) } - return netStatsTotal, nil + + return netStat, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index cca03327c3fe..ea83a75ffc42 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a // specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go new file mode 100644 index 000000000000..9df79c237999 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -0,0 +1,98 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Interrupt represents a single interrupt line. +type Interrupt struct { + // Info is the type of interrupt. + Info string + // Devices is the name of the device that is located at that IRQ + Devices string + // Values is the number of interrupts per CPU. + Values []string +} + +// Interrupts models the content of /proc/interrupts. Key is the IRQ number. +// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts +// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output +type Interrupts map[string]Interrupt + +// Interrupts creates a new instance from a given Proc instance. +func (p Proc) Interrupts() (Interrupts, error) { + data, err := util.ReadFileNoStat(p.path("interrupts")) + if err != nil { + return nil, err + } + return parseInterrupts(bytes.NewReader(data)) +} + +func parseInterrupts(r io.Reader) (Interrupts, error) { + var ( + interrupts = Interrupts{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return nil, errors.New("interrupts empty") + } + cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + if len(parts) == 0 { // skip empty lines + continue + } + if len(parts) < 2 { + return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) + } + intName := parts[0][:len(parts[0])-1] // remove trailing : + + if len(parts) == 2 { + interrupts[intName] = Interrupt{ + Info: "", + Devices: "", + Values: []string{ + parts[1], + }, + } + continue + } + + intr := Interrupt{ + Values: parts[1 : cpuNum+1], + } + + if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt + intr.Info = parts[cpuNum+1] + intr.Devices = strings.Join(parts[cpuNum+2:], " ") + } else { + intr.Info = strings.Join(parts[cpuNum+1:], " ") + } + interrupts[intName] = intr + } + + return interrupts, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 48b5238194e8..6a43bb245951 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -33,139 +33,140 @@ type ProcNetstat struct { } type TcpExt struct { // nolint:revive - SyncookiesSent float64 - SyncookiesRecv float64 - SyncookiesFailed float64 - EmbryonicRsts float64 - PruneCalled float64 - RcvPruned float64 - OfoPruned float64 - OutOfWindowIcmps float64 - LockDroppedIcmps float64 - ArpFilter float64 - TW float64 - TWRecycled float64 - TWKilled float64 - PAWSActive float64 - PAWSEstab float64 - DelayedACKs float64 - DelayedACKLocked float64 - DelayedACKLost float64 - ListenOverflows float64 - ListenDrops float64 - TCPHPHits float64 - TCPPureAcks float64 - TCPHPAcks float64 - TCPRenoRecovery float64 - TCPSackRecovery float64 - TCPSACKReneging float64 - TCPSACKReorder float64 - TCPRenoReorder float64 - TCPTSReorder float64 - TCPFullUndo float64 - TCPPartialUndo float64 - TCPDSACKUndo float64 - TCPLossUndo float64 - TCPLostRetransmit float64 - TCPRenoFailures float64 - TCPSackFailures float64 - TCPLossFailures float64 - TCPFastRetrans float64 - TCPSlowStartRetrans float64 - TCPTimeouts float64 - TCPLossProbes float64 - TCPLossProbeRecovery float64 - TCPRenoRecoveryFail float64 - TCPSackRecoveryFail float64 - TCPRcvCollapsed float64 - TCPDSACKOldSent float64 - TCPDSACKOfoSent float64 - TCPDSACKRecv float64 - TCPDSACKOfoRecv float64 - TCPAbortOnData float64 - TCPAbortOnClose float64 - TCPAbortOnMemory float64 - TCPAbortOnTimeout float64 - TCPAbortOnLinger float64 - TCPAbortFailed float64 - TCPMemoryPressures float64 - TCPMemoryPressuresChrono float64 - TCPSACKDiscard float64 - TCPDSACKIgnoredOld float64 - TCPDSACKIgnoredNoUndo float64 - TCPSpuriousRTOs float64 - TCPMD5NotFound float64 - TCPMD5Unexpected float64 - TCPMD5Failure float64 - TCPSackShifted float64 - TCPSackMerged float64 - TCPSackShiftFallback float64 - TCPBacklogDrop float64 - PFMemallocDrop float64 - TCPMinTTLDrop float64 - TCPDeferAcceptDrop float64 - IPReversePathFilter float64 - TCPTimeWaitOverflow float64 - TCPReqQFullDoCookies float64 - TCPReqQFullDrop float64 - TCPRetransFail float64 - TCPRcvCoalesce float64 - TCPOFOQueue float64 - TCPOFODrop float64 - TCPOFOMerge float64 - TCPChallengeACK float64 - TCPSYNChallenge float64 - TCPFastOpenActive float64 - TCPFastOpenActiveFail float64 - TCPFastOpenPassive float64 - TCPFastOpenPassiveFail float64 - TCPFastOpenListenOverflow float64 - TCPFastOpenCookieReqd float64 - TCPFastOpenBlackhole float64 - TCPSpuriousRtxHostQueues float64 - BusyPollRxPackets float64 - TCPAutoCorking float64 - TCPFromZeroWindowAdv float64 - TCPToZeroWindowAdv float64 - TCPWantZeroWindowAdv float64 - TCPSynRetrans float64 - TCPOrigDataSent float64 - TCPHystartTrainDetect float64 - TCPHystartTrainCwnd float64 - TCPHystartDelayDetect float64 - TCPHystartDelayCwnd float64 - TCPACKSkippedSynRecv float64 - TCPACKSkippedPAWS float64 - TCPACKSkippedSeq float64 - TCPACKSkippedFinWait2 float64 - TCPACKSkippedTimeWait float64 - TCPACKSkippedChallenge float64 - TCPWinProbe float64 - TCPKeepAlive float64 - TCPMTUPFail float64 - TCPMTUPSuccess float64 - TCPWqueueTooBig float64 + SyncookiesSent *float64 + SyncookiesRecv *float64 + SyncookiesFailed *float64 + EmbryonicRsts *float64 + PruneCalled *float64 + RcvPruned *float64 + OfoPruned *float64 + OutOfWindowIcmps *float64 + LockDroppedIcmps *float64 + ArpFilter *float64 + TW *float64 + TWRecycled *float64 + TWKilled *float64 + PAWSActive *float64 + PAWSEstab *float64 + DelayedACKs *float64 + DelayedACKLocked *float64 + DelayedACKLost *float64 + ListenOverflows *float64 + ListenDrops *float64 + TCPHPHits *float64 + TCPPureAcks *float64 + TCPHPAcks *float64 + TCPRenoRecovery *float64 + TCPSackRecovery *float64 + TCPSACKReneging *float64 + TCPSACKReorder *float64 + TCPRenoReorder *float64 + TCPTSReorder *float64 + TCPFullUndo *float64 + TCPPartialUndo *float64 + TCPDSACKUndo *float64 + TCPLossUndo *float64 + TCPLostRetransmit *float64 + TCPRenoFailures *float64 + TCPSackFailures *float64 + TCPLossFailures *float64 + TCPFastRetrans *float64 + TCPSlowStartRetrans *float64 + TCPTimeouts *float64 + TCPLossProbes *float64 + TCPLossProbeRecovery *float64 + TCPRenoRecoveryFail *float64 + TCPSackRecoveryFail *float64 + TCPRcvCollapsed *float64 + TCPDSACKOldSent *float64 + TCPDSACKOfoSent *float64 + TCPDSACKRecv *float64 + TCPDSACKOfoRecv *float64 + TCPAbortOnData *float64 + TCPAbortOnClose *float64 + TCPAbortOnMemory *float64 + TCPAbortOnTimeout *float64 + TCPAbortOnLinger *float64 + TCPAbortFailed *float64 + TCPMemoryPressures *float64 + TCPMemoryPressuresChrono *float64 + TCPSACKDiscard *float64 + TCPDSACKIgnoredOld *float64 + TCPDSACKIgnoredNoUndo *float64 + TCPSpuriousRTOs *float64 + TCPMD5NotFound *float64 + TCPMD5Unexpected *float64 + TCPMD5Failure *float64 + TCPSackShifted *float64 + TCPSackMerged *float64 + TCPSackShiftFallback *float64 + TCPBacklogDrop *float64 + PFMemallocDrop *float64 + TCPMinTTLDrop *float64 + TCPDeferAcceptDrop *float64 + IPReversePathFilter *float64 + TCPTimeWaitOverflow *float64 + TCPReqQFullDoCookies *float64 + TCPReqQFullDrop *float64 + TCPRetransFail *float64 + TCPRcvCoalesce *float64 + TCPRcvQDrop *float64 + TCPOFOQueue *float64 + TCPOFODrop *float64 + TCPOFOMerge *float64 + TCPChallengeACK *float64 + TCPSYNChallenge *float64 + TCPFastOpenActive *float64 + TCPFastOpenActiveFail *float64 + TCPFastOpenPassive *float64 + TCPFastOpenPassiveFail *float64 + TCPFastOpenListenOverflow *float64 + TCPFastOpenCookieReqd *float64 + TCPFastOpenBlackhole *float64 + TCPSpuriousRtxHostQueues *float64 + BusyPollRxPackets *float64 + TCPAutoCorking *float64 + TCPFromZeroWindowAdv *float64 + TCPToZeroWindowAdv *float64 + TCPWantZeroWindowAdv *float64 + TCPSynRetrans *float64 + TCPOrigDataSent *float64 + TCPHystartTrainDetect *float64 + TCPHystartTrainCwnd *float64 + TCPHystartDelayDetect *float64 + TCPHystartDelayCwnd *float64 + TCPACKSkippedSynRecv *float64 + TCPACKSkippedPAWS *float64 + TCPACKSkippedSeq *float64 + TCPACKSkippedFinWait2 *float64 + TCPACKSkippedTimeWait *float64 + TCPACKSkippedChallenge *float64 + TCPWinProbe *float64 + TCPKeepAlive *float64 + TCPMTUPFail *float64 + TCPMTUPSuccess *float64 + TCPWqueueTooBig *float64 } type IpExt struct { // nolint:revive - InNoRoutes float64 - InTruncatedPkts float64 - InMcastPkts float64 - OutMcastPkts float64 - InBcastPkts float64 - OutBcastPkts float64 - InOctets float64 - OutOctets float64 - InMcastOctets float64 - OutMcastOctets float64 - InBcastOctets float64 - OutBcastOctets float64 - InCsumErrors float64 - InNoECTPkts float64 - InECT1Pkts float64 - InECT0Pkts float64 - InCEPkts float64 - ReasmOverlaps float64 + InNoRoutes *float64 + InTruncatedPkts *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InBcastPkts *float64 + OutBcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InCsumErrors *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 + ReasmOverlaps *float64 } func (p Proc) Netstat() (ProcNetstat, error) { @@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) { if err != nil { return ProcNetstat{PID: p.PID}, err } - procNetstat, err := parseNetstat(bytes.NewReader(data), filename) + procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename) procNetstat.PID = p.PID return procNetstat, err } -// parseNetstat parses the metrics from proc//net/netstat file +// parseProcNetstat parses the metrics from proc//net/netstat file // and returns a ProcNetstat structure. -func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { +func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { var ( scanner = bufio.NewScanner(r) procNetstat = ProcNetstat{} @@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { case "TcpExt": switch key { case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = value + procNetstat.TcpExt.SyncookiesSent = &value case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = value + procNetstat.TcpExt.SyncookiesRecv = &value case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = value + procNetstat.TcpExt.SyncookiesFailed = &value case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = value + procNetstat.TcpExt.EmbryonicRsts = &value case "PruneCalled": - procNetstat.TcpExt.PruneCalled = value + procNetstat.TcpExt.PruneCalled = &value case "RcvPruned": - procNetstat.TcpExt.RcvPruned = value + procNetstat.TcpExt.RcvPruned = &value case "OfoPruned": - procNetstat.TcpExt.OfoPruned = value + procNetstat.TcpExt.OfoPruned = &value case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = value + procNetstat.TcpExt.OutOfWindowIcmps = &value case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = value + procNetstat.TcpExt.LockDroppedIcmps = &value case "ArpFilter": - procNetstat.TcpExt.ArpFilter = value + procNetstat.TcpExt.ArpFilter = &value case "TW": - procNetstat.TcpExt.TW = value + procNetstat.TcpExt.TW = &value case "TWRecycled": - procNetstat.TcpExt.TWRecycled = value + procNetstat.TcpExt.TWRecycled = &value case "TWKilled": - procNetstat.TcpExt.TWKilled = value + procNetstat.TcpExt.TWKilled = &value case "PAWSActive": - procNetstat.TcpExt.PAWSActive = value + procNetstat.TcpExt.PAWSActive = &value case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = value + procNetstat.TcpExt.PAWSEstab = &value case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = value + procNetstat.TcpExt.DelayedACKs = &value case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = value + procNetstat.TcpExt.DelayedACKLocked = &value case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = value + procNetstat.TcpExt.DelayedACKLost = &value case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = value + procNetstat.TcpExt.ListenOverflows = &value case "ListenDrops": - procNetstat.TcpExt.ListenDrops = value + procNetstat.TcpExt.ListenDrops = &value case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = value + procNetstat.TcpExt.TCPHPHits = &value case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = value + procNetstat.TcpExt.TCPPureAcks = &value case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = value + procNetstat.TcpExt.TCPHPAcks = &value case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = value + procNetstat.TcpExt.TCPRenoRecovery = &value case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = value + procNetstat.TcpExt.TCPSackRecovery = &value case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = value + procNetstat.TcpExt.TCPSACKReneging = &value case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = value + procNetstat.TcpExt.TCPSACKReorder = &value case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = value + procNetstat.TcpExt.TCPRenoReorder = &value case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = value + procNetstat.TcpExt.TCPTSReorder = &value case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = value + procNetstat.TcpExt.TCPFullUndo = &value case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = value + procNetstat.TcpExt.TCPPartialUndo = &value case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = value + procNetstat.TcpExt.TCPDSACKUndo = &value case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = value + procNetstat.TcpExt.TCPLossUndo = &value case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = value + procNetstat.TcpExt.TCPLostRetransmit = &value case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = value + procNetstat.TcpExt.TCPRenoFailures = &value case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = value + procNetstat.TcpExt.TCPSackFailures = &value case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = value + procNetstat.TcpExt.TCPLossFailures = &value case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = value + procNetstat.TcpExt.TCPFastRetrans = &value case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = value + procNetstat.TcpExt.TCPSlowStartRetrans = &value case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = value + procNetstat.TcpExt.TCPTimeouts = &value case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = value + procNetstat.TcpExt.TCPLossProbes = &value case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = value + procNetstat.TcpExt.TCPLossProbeRecovery = &value case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = value + procNetstat.TcpExt.TCPRenoRecoveryFail = &value case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = value + procNetstat.TcpExt.TCPSackRecoveryFail = &value case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = value + procNetstat.TcpExt.TCPRcvCollapsed = &value case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = value + procNetstat.TcpExt.TCPDSACKOldSent = &value case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = value + procNetstat.TcpExt.TCPDSACKOfoSent = &value case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = value + procNetstat.TcpExt.TCPDSACKRecv = &value case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = value + procNetstat.TcpExt.TCPDSACKOfoRecv = &value case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = value + procNetstat.TcpExt.TCPAbortOnData = &value case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = value + procNetstat.TcpExt.TCPAbortOnClose = &value case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = value + procNetstat.TcpExt.TCPDeferAcceptDrop = &value case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = value + procNetstat.TcpExt.IPReversePathFilter = &value case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = value + procNetstat.TcpExt.TCPTimeWaitOverflow = &value case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = value + procNetstat.TcpExt.TCPReqQFullDoCookies = &value case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = value + procNetstat.TcpExt.TCPReqQFullDrop = &value case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = value + procNetstat.TcpExt.TCPRetransFail = &value case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = value + procNetstat.TcpExt.TCPRcvCoalesce = &value + case "TCPRcvQDrop": + procNetstat.TcpExt.TCPRcvQDrop = &value case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = value + procNetstat.TcpExt.TCPOFOQueue = &value case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = value + procNetstat.TcpExt.TCPOFODrop = &value case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = value + procNetstat.TcpExt.TCPOFOMerge = &value case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = value + procNetstat.TcpExt.TCPChallengeACK = &value case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = value + procNetstat.TcpExt.TCPSYNChallenge = &value case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = value + procNetstat.TcpExt.TCPFastOpenActive = &value case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = value + procNetstat.TcpExt.TCPFastOpenActiveFail = &value case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = value + procNetstat.TcpExt.TCPFastOpenPassive = &value case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = value + procNetstat.TcpExt.TCPFastOpenPassiveFail = &value case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = value + procNetstat.TcpExt.TCPFastOpenListenOverflow = &value case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = value + procNetstat.TcpExt.TCPFastOpenCookieReqd = &value case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = value + procNetstat.TcpExt.TCPFastOpenBlackhole = &value case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = value + procNetstat.TcpExt.BusyPollRxPackets = &value case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = value + procNetstat.TcpExt.TCPAutoCorking = &value case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = value + procNetstat.TcpExt.TCPFromZeroWindowAdv = &value case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = value + procNetstat.TcpExt.TCPToZeroWindowAdv = &value case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = value + procNetstat.TcpExt.TCPWantZeroWindowAdv = &value case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = value + procNetstat.TcpExt.TCPSynRetrans = &value case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = value + procNetstat.TcpExt.TCPOrigDataSent = &value case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = value + procNetstat.TcpExt.TCPHystartTrainDetect = &value case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = value + procNetstat.TcpExt.TCPHystartTrainCwnd = &value case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = value + procNetstat.TcpExt.TCPHystartDelayDetect = &value case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = value + procNetstat.TcpExt.TCPHystartDelayCwnd = &value case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = value + procNetstat.TcpExt.TCPACKSkippedSynRecv = &value case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = value + procNetstat.TcpExt.TCPACKSkippedPAWS = &value case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = value + procNetstat.TcpExt.TCPACKSkippedSeq = &value case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = value + procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = value + procNetstat.TcpExt.TCPACKSkippedTimeWait = &value case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = value + procNetstat.TcpExt.TCPACKSkippedChallenge = &value case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = value + procNetstat.TcpExt.TCPWinProbe = &value case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = value + procNetstat.TcpExt.TCPKeepAlive = &value case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = value + procNetstat.TcpExt.TCPMTUPFail = &value case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = value + procNetstat.TcpExt.TCPMTUPSuccess = &value case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = value + procNetstat.TcpExt.TCPWqueueTooBig = &value } case "IpExt": switch key { case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = value + procNetstat.IpExt.InNoRoutes = &value case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = value + procNetstat.IpExt.InTruncatedPkts = &value case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = value + procNetstat.IpExt.InMcastPkts = &value case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = value + procNetstat.IpExt.OutMcastPkts = &value case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = value + procNetstat.IpExt.InBcastPkts = &value case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = value + procNetstat.IpExt.OutBcastPkts = &value case "InOctets": - procNetstat.IpExt.InOctets = value + procNetstat.IpExt.InOctets = &value case "OutOctets": - procNetstat.IpExt.OutOctets = value + procNetstat.IpExt.OutOctets = &value case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = value + procNetstat.IpExt.InMcastOctets = &value case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = value + procNetstat.IpExt.OutMcastOctets = &value case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = value + procNetstat.IpExt.InBcastOctets = &value case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = value + procNetstat.IpExt.OutBcastOctets = &value case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = value + procNetstat.IpExt.InCsumErrors = &value case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = value + procNetstat.IpExt.InNoECTPkts = &value case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = value + procNetstat.IpExt.InECT1Pkts = &value case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = value + procNetstat.IpExt.InECT0Pkts = &value case "InCEPkts": - procNetstat.IpExt.InCEPkts = value + procNetstat.IpExt.InCEPkts = &value case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = value + procNetstat.IpExt.ReasmOverlaps = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index ae191896cbd7..6c46b718849c 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -37,100 +37,100 @@ type ProcSnmp struct { } type Ip struct { // nolint:revive - Forwarding float64 - DefaultTTL float64 - InReceives float64 - InHdrErrors float64 - InAddrErrors float64 - ForwDatagrams float64 - InUnknownProtos float64 - InDiscards float64 - InDelivers float64 - OutRequests float64 - OutDiscards float64 - OutNoRoutes float64 - ReasmTimeout float64 - ReasmReqds float64 - ReasmOKs float64 - ReasmFails float64 - FragOKs float64 - FragFails float64 - FragCreates float64 + Forwarding *float64 + DefaultTTL *float64 + InReceives *float64 + InHdrErrors *float64 + InAddrErrors *float64 + ForwDatagrams *float64 + InUnknownProtos *float64 + InDiscards *float64 + InDelivers *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 } -type Icmp struct { - InMsgs float64 - InErrors float64 - InCsumErrors float64 - InDestUnreachs float64 - InTimeExcds float64 - InParmProbs float64 - InSrcQuenchs float64 - InRedirects float64 - InEchos float64 - InEchoReps float64 - InTimestamps float64 - InTimestampReps float64 - InAddrMasks float64 - InAddrMaskReps float64 - OutMsgs float64 - OutErrors float64 - OutDestUnreachs float64 - OutTimeExcds float64 - OutParmProbs float64 - OutSrcQuenchs float64 - OutRedirects float64 - OutEchos float64 - OutEchoReps float64 - OutTimestamps float64 - OutTimestampReps float64 - OutAddrMasks float64 - OutAddrMaskReps float64 +type Icmp struct { // nolint:revive + InMsgs *float64 + InErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InTimeExcds *float64 + InParmProbs *float64 + InSrcQuenchs *float64 + InRedirects *float64 + InEchos *float64 + InEchoReps *float64 + InTimestamps *float64 + InTimestampReps *float64 + InAddrMasks *float64 + InAddrMaskReps *float64 + OutMsgs *float64 + OutErrors *float64 + OutDestUnreachs *float64 + OutTimeExcds *float64 + OutParmProbs *float64 + OutSrcQuenchs *float64 + OutRedirects *float64 + OutEchos *float64 + OutEchoReps *float64 + OutTimestamps *float64 + OutTimestampReps *float64 + OutAddrMasks *float64 + OutAddrMaskReps *float64 } type IcmpMsg struct { - InType3 float64 - OutType3 float64 + InType3 *float64 + OutType3 *float64 } type Tcp struct { // nolint:revive - RtoAlgorithm float64 - RtoMin float64 - RtoMax float64 - MaxConn float64 - ActiveOpens float64 - PassiveOpens float64 - AttemptFails float64 - EstabResets float64 - CurrEstab float64 - InSegs float64 - OutSegs float64 - RetransSegs float64 - InErrs float64 - OutRsts float64 - InCsumErrors float64 + RtoAlgorithm *float64 + RtoMin *float64 + RtoMax *float64 + MaxConn *float64 + ActiveOpens *float64 + PassiveOpens *float64 + AttemptFails *float64 + EstabResets *float64 + CurrEstab *float64 + InSegs *float64 + OutSegs *float64 + RetransSegs *float64 + InErrs *float64 + OutRsts *float64 + InCsumErrors *float64 } type Udp struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } type UdpLite struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } func (p Proc) Snmp() (ProcSnmp, error) { @@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { case "Ip": switch key { case "Forwarding": - procSnmp.Ip.Forwarding = value + procSnmp.Ip.Forwarding = &value case "DefaultTTL": - procSnmp.Ip.DefaultTTL = value + procSnmp.Ip.DefaultTTL = &value case "InReceives": - procSnmp.Ip.InReceives = value + procSnmp.Ip.InReceives = &value case "InHdrErrors": - procSnmp.Ip.InHdrErrors = value + procSnmp.Ip.InHdrErrors = &value case "InAddrErrors": - procSnmp.Ip.InAddrErrors = value + procSnmp.Ip.InAddrErrors = &value case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = value + procSnmp.Ip.ForwDatagrams = &value case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = value + procSnmp.Ip.InUnknownProtos = &value case "InDiscards": - procSnmp.Ip.InDiscards = value + procSnmp.Ip.InDiscards = &value case "InDelivers": - procSnmp.Ip.InDelivers = value + procSnmp.Ip.InDelivers = &value case "OutRequests": - procSnmp.Ip.OutRequests = value + procSnmp.Ip.OutRequests = &value case "OutDiscards": - procSnmp.Ip.OutDiscards = value + procSnmp.Ip.OutDiscards = &value case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = value + procSnmp.Ip.OutNoRoutes = &value case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = value + procSnmp.Ip.ReasmTimeout = &value case "ReasmReqds": - procSnmp.Ip.ReasmReqds = value + procSnmp.Ip.ReasmReqds = &value case "ReasmOKs": - procSnmp.Ip.ReasmOKs = value + procSnmp.Ip.ReasmOKs = &value case "ReasmFails": - procSnmp.Ip.ReasmFails = value + procSnmp.Ip.ReasmFails = &value case "FragOKs": - procSnmp.Ip.FragOKs = value + procSnmp.Ip.FragOKs = &value case "FragFails": - procSnmp.Ip.FragFails = value + procSnmp.Ip.FragFails = &value case "FragCreates": - procSnmp.Ip.FragCreates = value + procSnmp.Ip.FragCreates = &value } case "Icmp": switch key { case "InMsgs": - procSnmp.Icmp.InMsgs = value + procSnmp.Icmp.InMsgs = &value case "InErrors": - procSnmp.Icmp.InErrors = value + procSnmp.Icmp.InErrors = &value case "InCsumErrors": - procSnmp.Icmp.InCsumErrors = value + procSnmp.Icmp.InCsumErrors = &value case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = value + procSnmp.Icmp.InDestUnreachs = &value case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = value + procSnmp.Icmp.InTimeExcds = &value case "InParmProbs": - procSnmp.Icmp.InParmProbs = value + procSnmp.Icmp.InParmProbs = &value case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = value + procSnmp.Icmp.InSrcQuenchs = &value case "InRedirects": - procSnmp.Icmp.InRedirects = value + procSnmp.Icmp.InRedirects = &value case "InEchos": - procSnmp.Icmp.InEchos = value + procSnmp.Icmp.InEchos = &value case "InEchoReps": - procSnmp.Icmp.InEchoReps = value + procSnmp.Icmp.InEchoReps = &value case "InTimestamps": - procSnmp.Icmp.InTimestamps = value + procSnmp.Icmp.InTimestamps = &value case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = value + procSnmp.Icmp.InTimestampReps = &value case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = value + procSnmp.Icmp.InAddrMasks = &value case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = value + procSnmp.Icmp.InAddrMaskReps = &value case "OutMsgs": - procSnmp.Icmp.OutMsgs = value + procSnmp.Icmp.OutMsgs = &value case "OutErrors": - procSnmp.Icmp.OutErrors = value + procSnmp.Icmp.OutErrors = &value case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = value + procSnmp.Icmp.OutDestUnreachs = &value case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = value + procSnmp.Icmp.OutTimeExcds = &value case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = value + procSnmp.Icmp.OutParmProbs = &value case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = value + procSnmp.Icmp.OutSrcQuenchs = &value case "OutRedirects": - procSnmp.Icmp.OutRedirects = value + procSnmp.Icmp.OutRedirects = &value case "OutEchos": - procSnmp.Icmp.OutEchos = value + procSnmp.Icmp.OutEchos = &value case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = value + procSnmp.Icmp.OutEchoReps = &value case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = value + procSnmp.Icmp.OutTimestamps = &value case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = value + procSnmp.Icmp.OutTimestampReps = &value case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = value + procSnmp.Icmp.OutAddrMasks = &value case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = value + procSnmp.Icmp.OutAddrMaskReps = &value } case "IcmpMsg": switch key { case "InType3": - procSnmp.IcmpMsg.InType3 = value + procSnmp.IcmpMsg.InType3 = &value case "OutType3": - procSnmp.IcmpMsg.OutType3 = value + procSnmp.IcmpMsg.OutType3 = &value } case "Tcp": switch key { case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = value + procSnmp.Tcp.RtoAlgorithm = &value case "RtoMin": - procSnmp.Tcp.RtoMin = value + procSnmp.Tcp.RtoMin = &value case "RtoMax": - procSnmp.Tcp.RtoMax = value + procSnmp.Tcp.RtoMax = &value case "MaxConn": - procSnmp.Tcp.MaxConn = value + procSnmp.Tcp.MaxConn = &value case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = value + procSnmp.Tcp.ActiveOpens = &value case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = value + procSnmp.Tcp.PassiveOpens = &value case "AttemptFails": - procSnmp.Tcp.AttemptFails = value + procSnmp.Tcp.AttemptFails = &value case "EstabResets": - procSnmp.Tcp.EstabResets = value + procSnmp.Tcp.EstabResets = &value case "CurrEstab": - procSnmp.Tcp.CurrEstab = value + procSnmp.Tcp.CurrEstab = &value case "InSegs": - procSnmp.Tcp.InSegs = value + procSnmp.Tcp.InSegs = &value case "OutSegs": - procSnmp.Tcp.OutSegs = value + procSnmp.Tcp.OutSegs = &value case "RetransSegs": - procSnmp.Tcp.RetransSegs = value + procSnmp.Tcp.RetransSegs = &value case "InErrs": - procSnmp.Tcp.InErrs = value + procSnmp.Tcp.InErrs = &value case "OutRsts": - procSnmp.Tcp.OutRsts = value + procSnmp.Tcp.OutRsts = &value case "InCsumErrors": - procSnmp.Tcp.InCsumErrors = value + procSnmp.Tcp.InCsumErrors = &value } case "Udp": switch key { case "InDatagrams": - procSnmp.Udp.InDatagrams = value + procSnmp.Udp.InDatagrams = &value case "NoPorts": - procSnmp.Udp.NoPorts = value + procSnmp.Udp.NoPorts = &value case "InErrors": - procSnmp.Udp.InErrors = value + procSnmp.Udp.InErrors = &value case "OutDatagrams": - procSnmp.Udp.OutDatagrams = value + procSnmp.Udp.OutDatagrams = &value case "RcvbufErrors": - procSnmp.Udp.RcvbufErrors = value + procSnmp.Udp.RcvbufErrors = &value case "SndbufErrors": - procSnmp.Udp.SndbufErrors = value + procSnmp.Udp.SndbufErrors = &value case "InCsumErrors": - procSnmp.Udp.InCsumErrors = value + procSnmp.Udp.InCsumErrors = &value case "IgnoredMulti": - procSnmp.Udp.IgnoredMulti = value + procSnmp.Udp.IgnoredMulti = &value } case "UdpLite": switch key { case "InDatagrams": - procSnmp.UdpLite.InDatagrams = value + procSnmp.UdpLite.InDatagrams = &value case "NoPorts": - procSnmp.UdpLite.NoPorts = value + procSnmp.UdpLite.NoPorts = &value case "InErrors": - procSnmp.UdpLite.InErrors = value + procSnmp.UdpLite.InErrors = &value case "OutDatagrams": - procSnmp.UdpLite.OutDatagrams = value + procSnmp.UdpLite.OutDatagrams = &value case "RcvbufErrors": - procSnmp.UdpLite.RcvbufErrors = value + procSnmp.UdpLite.RcvbufErrors = &value case "SndbufErrors": - procSnmp.UdpLite.SndbufErrors = value + procSnmp.UdpLite.SndbufErrors = &value case "InCsumErrors": - procSnmp.UdpLite.InCsumErrors = value + procSnmp.UdpLite.InCsumErrors = &value case "IgnoredMulti": - procSnmp.UdpLite.IgnoredMulti = value + procSnmp.UdpLite.IgnoredMulti = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index f611992d52ca..3059cc6a1367 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -36,106 +36,106 @@ type ProcSnmp6 struct { } type Ip6 struct { // nolint:revive - InReceives float64 - InHdrErrors float64 - InTooBigErrors float64 - InNoRoutes float64 - InAddrErrors float64 - InUnknownProtos float64 - InTruncatedPkts float64 - InDiscards float64 - InDelivers float64 - OutForwDatagrams float64 - OutRequests float64 - OutDiscards float64 - OutNoRoutes float64 - ReasmTimeout float64 - ReasmReqds float64 - ReasmOKs float64 - ReasmFails float64 - FragOKs float64 - FragFails float64 - FragCreates float64 - InMcastPkts float64 - OutMcastPkts float64 - InOctets float64 - OutOctets float64 - InMcastOctets float64 - OutMcastOctets float64 - InBcastOctets float64 - OutBcastOctets float64 - InNoECTPkts float64 - InECT1Pkts float64 - InECT0Pkts float64 - InCEPkts float64 + InReceives *float64 + InHdrErrors *float64 + InTooBigErrors *float64 + InNoRoutes *float64 + InAddrErrors *float64 + InUnknownProtos *float64 + InTruncatedPkts *float64 + InDiscards *float64 + InDelivers *float64 + OutForwDatagrams *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 } type Icmp6 struct { - InMsgs float64 - InErrors float64 - OutMsgs float64 - OutErrors float64 - InCsumErrors float64 - InDestUnreachs float64 - InPktTooBigs float64 - InTimeExcds float64 - InParmProblems float64 - InEchos float64 - InEchoReplies float64 - InGroupMembQueries float64 - InGroupMembResponses float64 - InGroupMembReductions float64 - InRouterSolicits float64 - InRouterAdvertisements float64 - InNeighborSolicits float64 - InNeighborAdvertisements float64 - InRedirects float64 - InMLDv2Reports float64 - OutDestUnreachs float64 - OutPktTooBigs float64 - OutTimeExcds float64 - OutParmProblems float64 - OutEchos float64 - OutEchoReplies float64 - OutGroupMembQueries float64 - OutGroupMembResponses float64 - OutGroupMembReductions float64 - OutRouterSolicits float64 - OutRouterAdvertisements float64 - OutNeighborSolicits float64 - OutNeighborAdvertisements float64 - OutRedirects float64 - OutMLDv2Reports float64 - InType1 float64 - InType134 float64 - InType135 float64 - InType136 float64 - InType143 float64 - OutType133 float64 - OutType135 float64 - OutType136 float64 - OutType143 float64 + InMsgs *float64 + InErrors *float64 + OutMsgs *float64 + OutErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InPktTooBigs *float64 + InTimeExcds *float64 + InParmProblems *float64 + InEchos *float64 + InEchoReplies *float64 + InGroupMembQueries *float64 + InGroupMembResponses *float64 + InGroupMembReductions *float64 + InRouterSolicits *float64 + InRouterAdvertisements *float64 + InNeighborSolicits *float64 + InNeighborAdvertisements *float64 + InRedirects *float64 + InMLDv2Reports *float64 + OutDestUnreachs *float64 + OutPktTooBigs *float64 + OutTimeExcds *float64 + OutParmProblems *float64 + OutEchos *float64 + OutEchoReplies *float64 + OutGroupMembQueries *float64 + OutGroupMembResponses *float64 + OutGroupMembReductions *float64 + OutRouterSolicits *float64 + OutRouterAdvertisements *float64 + OutNeighborSolicits *float64 + OutNeighborAdvertisements *float64 + OutRedirects *float64 + OutMLDv2Reports *float64 + InType1 *float64 + InType134 *float64 + InType135 *float64 + InType136 *float64 + InType143 *float64 + OutType133 *float64 + OutType135 *float64 + OutType136 *float64 + OutType143 *float64 } type Udp6 struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 - IgnoredMulti float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 } type UdpLite6 struct { // nolint:revive - InDatagrams float64 - NoPorts float64 - InErrors float64 - OutDatagrams float64 - RcvbufErrors float64 - SndbufErrors float64 - InCsumErrors float64 + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 } func (p Proc) Snmp6() (ProcSnmp6, error) { @@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { case "Ip6": switch key { case "InReceives": - procSnmp6.Ip6.InReceives = value + procSnmp6.Ip6.InReceives = &value case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = value + procSnmp6.Ip6.InHdrErrors = &value case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = value + procSnmp6.Ip6.InTooBigErrors = &value case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = value + procSnmp6.Ip6.InNoRoutes = &value case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = value + procSnmp6.Ip6.InAddrErrors = &value case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = value + procSnmp6.Ip6.InUnknownProtos = &value case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = value + procSnmp6.Ip6.InTruncatedPkts = &value case "InDiscards": - procSnmp6.Ip6.InDiscards = value + procSnmp6.Ip6.InDiscards = &value case "InDelivers": - procSnmp6.Ip6.InDelivers = value + procSnmp6.Ip6.InDelivers = &value case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = value + procSnmp6.Ip6.OutForwDatagrams = &value case "OutRequests": - procSnmp6.Ip6.OutRequests = value + procSnmp6.Ip6.OutRequests = &value case "OutDiscards": - procSnmp6.Ip6.OutDiscards = value + procSnmp6.Ip6.OutDiscards = &value case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = value + procSnmp6.Ip6.OutNoRoutes = &value case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = value + procSnmp6.Ip6.ReasmTimeout = &value case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = value + procSnmp6.Ip6.ReasmReqds = &value case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = value + procSnmp6.Ip6.ReasmOKs = &value case "ReasmFails": - procSnmp6.Ip6.ReasmFails = value + procSnmp6.Ip6.ReasmFails = &value case "FragOKs": - procSnmp6.Ip6.FragOKs = value + procSnmp6.Ip6.FragOKs = &value case "FragFails": - procSnmp6.Ip6.FragFails = value + procSnmp6.Ip6.FragFails = &value case "FragCreates": - procSnmp6.Ip6.FragCreates = value + procSnmp6.Ip6.FragCreates = &value case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = value + procSnmp6.Ip6.InMcastPkts = &value case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = value + procSnmp6.Ip6.OutMcastPkts = &value case "InOctets": - procSnmp6.Ip6.InOctets = value + procSnmp6.Ip6.InOctets = &value case "OutOctets": - procSnmp6.Ip6.OutOctets = value + procSnmp6.Ip6.OutOctets = &value case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = value + procSnmp6.Ip6.InMcastOctets = &value case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = value + procSnmp6.Ip6.OutMcastOctets = &value case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = value + procSnmp6.Ip6.InBcastOctets = &value case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = value + procSnmp6.Ip6.OutBcastOctets = &value case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = value + procSnmp6.Ip6.InNoECTPkts = &value case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = value + procSnmp6.Ip6.InECT1Pkts = &value case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = value + procSnmp6.Ip6.InECT0Pkts = &value case "InCEPkts": - procSnmp6.Ip6.InCEPkts = value + procSnmp6.Ip6.InCEPkts = &value } case "Icmp6": switch key { case "InMsgs": - procSnmp6.Icmp6.InMsgs = value + procSnmp6.Icmp6.InMsgs = &value case "InErrors": - procSnmp6.Icmp6.InErrors = value + procSnmp6.Icmp6.InErrors = &value case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = value + procSnmp6.Icmp6.OutMsgs = &value case "OutErrors": - procSnmp6.Icmp6.OutErrors = value + procSnmp6.Icmp6.OutErrors = &value case "InCsumErrors": - procSnmp6.Icmp6.InCsumErrors = value + procSnmp6.Icmp6.InCsumErrors = &value case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = value + procSnmp6.Icmp6.InDestUnreachs = &value case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = value + procSnmp6.Icmp6.InPktTooBigs = &value case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = value + procSnmp6.Icmp6.InTimeExcds = &value case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = value + procSnmp6.Icmp6.InParmProblems = &value case "InEchos": - procSnmp6.Icmp6.InEchos = value + procSnmp6.Icmp6.InEchos = &value case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = value + procSnmp6.Icmp6.InEchoReplies = &value case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = value + procSnmp6.Icmp6.InGroupMembQueries = &value case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = value + procSnmp6.Icmp6.InGroupMembResponses = &value case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = value + procSnmp6.Icmp6.InGroupMembReductions = &value case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = value + procSnmp6.Icmp6.InRouterSolicits = &value case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = value + procSnmp6.Icmp6.InRouterAdvertisements = &value case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = value + procSnmp6.Icmp6.InNeighborSolicits = &value case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = value + procSnmp6.Icmp6.InNeighborAdvertisements = &value case "InRedirects": - procSnmp6.Icmp6.InRedirects = value + procSnmp6.Icmp6.InRedirects = &value case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = value + procSnmp6.Icmp6.InMLDv2Reports = &value case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = value + procSnmp6.Icmp6.OutDestUnreachs = &value case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = value + procSnmp6.Icmp6.OutPktTooBigs = &value case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = value + procSnmp6.Icmp6.OutTimeExcds = &value case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = value + procSnmp6.Icmp6.OutParmProblems = &value case "OutEchos": - procSnmp6.Icmp6.OutEchos = value + procSnmp6.Icmp6.OutEchos = &value case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = value + procSnmp6.Icmp6.OutEchoReplies = &value case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = value + procSnmp6.Icmp6.OutGroupMembQueries = &value case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = value + procSnmp6.Icmp6.OutGroupMembResponses = &value case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = value + procSnmp6.Icmp6.OutGroupMembReductions = &value case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = value + procSnmp6.Icmp6.OutRouterSolicits = &value case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = value + procSnmp6.Icmp6.OutRouterAdvertisements = &value case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = value + procSnmp6.Icmp6.OutNeighborSolicits = &value case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = value + procSnmp6.Icmp6.OutNeighborAdvertisements = &value case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = value + procSnmp6.Icmp6.OutRedirects = &value case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = value + procSnmp6.Icmp6.OutMLDv2Reports = &value case "InType1": - procSnmp6.Icmp6.InType1 = value + procSnmp6.Icmp6.InType1 = &value case "InType134": - procSnmp6.Icmp6.InType134 = value + procSnmp6.Icmp6.InType134 = &value case "InType135": - procSnmp6.Icmp6.InType135 = value + procSnmp6.Icmp6.InType135 = &value case "InType136": - procSnmp6.Icmp6.InType136 = value + procSnmp6.Icmp6.InType136 = &value case "InType143": - procSnmp6.Icmp6.InType143 = value + procSnmp6.Icmp6.InType143 = &value case "OutType133": - procSnmp6.Icmp6.OutType133 = value + procSnmp6.Icmp6.OutType133 = &value case "OutType135": - procSnmp6.Icmp6.OutType135 = value + procSnmp6.Icmp6.OutType135 = &value case "OutType136": - procSnmp6.Icmp6.OutType136 = value + procSnmp6.Icmp6.OutType136 = &value case "OutType143": - procSnmp6.Icmp6.OutType143 = value + procSnmp6.Icmp6.OutType143 = &value } case "Udp6": switch key { case "InDatagrams": - procSnmp6.Udp6.InDatagrams = value + procSnmp6.Udp6.InDatagrams = &value case "NoPorts": - procSnmp6.Udp6.NoPorts = value + procSnmp6.Udp6.NoPorts = &value case "InErrors": - procSnmp6.Udp6.InErrors = value + procSnmp6.Udp6.InErrors = &value case "OutDatagrams": - procSnmp6.Udp6.OutDatagrams = value + procSnmp6.Udp6.OutDatagrams = &value case "RcvbufErrors": - procSnmp6.Udp6.RcvbufErrors = value + procSnmp6.Udp6.RcvbufErrors = &value case "SndbufErrors": - procSnmp6.Udp6.SndbufErrors = value + procSnmp6.Udp6.SndbufErrors = &value case "InCsumErrors": - procSnmp6.Udp6.InCsumErrors = value + procSnmp6.Udp6.InCsumErrors = &value case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = value + procSnmp6.Udp6.IgnoredMulti = &value } case "UdpLite6": switch key { case "InDatagrams": - procSnmp6.UdpLite6.InDatagrams = value + procSnmp6.UdpLite6.InDatagrams = &value case "NoPorts": - procSnmp6.UdpLite6.NoPorts = value + procSnmp6.UdpLite6.NoPorts = &value case "InErrors": - procSnmp6.UdpLite6.InErrors = value + procSnmp6.UdpLite6.InErrors = &value case "OutDatagrams": - procSnmp6.UdpLite6.OutDatagrams = value + procSnmp6.UdpLite6.OutDatagrams = &value case "RcvbufErrors": - procSnmp6.UdpLite6.RcvbufErrors = value + procSnmp6.UdpLite6.RcvbufErrors = &value case "SndbufErrors": - procSnmp6.UdpLite6.SndbufErrors = value + procSnmp6.UdpLite6.SndbufErrors = &value case "InCsumErrors": - procSnmp6.UdpLite6.InCsumErrors = value + procSnmp6.UdpLite6.InCsumErrors = &value } } } diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 06c556ef9623..b278eb2c2df7 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -102,6 +102,8 @@ type ProcStat struct { RSS int // Soft limit in bytes on the rss of the process. RSSLimit uint64 + // CPU number last executed on. + Processor uint // Real-time scheduling priority, a number in the range 1 to 99 for processes // scheduled under a real-time policy, or 0, for non-real-time processes. RTPriority uint @@ -184,7 +186,7 @@ func (p Proc) Stat() (ProcStat, error) { &ignoreUint64, &ignoreUint64, &ignoreInt64, - &ignoreInt64, + &s.Processor, &s.RTPriority, &s.Policy, &s.DelayAcctBlkIOTicks, diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 594022ded48a..3d8c06439a93 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -96,10 +96,10 @@ func (p Proc) NewStatus() (ProcStatus, error) { kv := strings.SplitN(line, ":", 2) // removes spaces - k := string(strings.TrimSpace(kv[0])) - v := string(strings.TrimSpace(kv[1])) + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) // removes "kB" - v = string(bytes.Trim([]byte(v), " kB")) + v = strings.TrimSuffix(v, " kB") // value to int when possible // we can skip error check here, 'cause vKBytes is not used when value is a string diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 33f97caa08da..586af48af9f6 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -62,7 +62,7 @@ type Stat struct { // Summed up cpu statistics. CPUTotal CPUStat // Per-CPU statistics. - CPU []CPUStat + CPU map[int64]CPUStat // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. IRQTotal uint64 // Number of times a numbered IRQ was triggered. @@ -170,10 +170,23 @@ func (fs FS) Stat() (Stat, error) { if err != nil { return Stat{}, err } + procStat, err := parseStat(bytes.NewReader(data), fileName) + if err != nil { + return Stat{}, err + } + return procStat, nil +} - stat := Stat{} +// parseStat parses the metrics from /proc/[pid]/stat. +func parseStat(r io.Reader, fileName string) (Stat, error) { + var ( + scanner = bufio.NewScanner(r) + stat = Stat{ + CPU: make(map[int64]CPUStat), + } + err error + ) - scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -228,9 +241,6 @@ func (fs FS) Stat() (Stat, error) { if cpuID == -1 { stat.CPUTotal = cpuStat } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } stat.CPU[cpuID] = cpuStat } } diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go new file mode 100644 index 000000000000..f08bfc769db1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -0,0 +1,79 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + + fsi "github.com/prometheus/procfs/internal/fs" +) + +// Provide access to /proc/PID/task/TID files, for thread specific values. Since +// such files have the same structure as /proc/PID/ ones, the data structures +// and the parsers for the latter may be reused. + +// AllThreads returns a list of all currently available threads under /proc/PID. +func AllThreads(pid int) (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllThreads(pid) +} + +// AllThreads returns a list of all currently available threads for PID. +func (fs FS) AllThreads(pid int) (Procs, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + d, err := os.Open(taskPath) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + } + + t := Procs{} + for _, n := range names { + tid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)}) + } + + return t, nil +} + +// Thread returns a process for a given PID, TID. +func (fs FS) Thread(pid, tid int) (Proc, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + if _, err := os.Stat(taskPath); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil +} + +// Thread returns a process for a given TID of Proc. +func (proc Proc) Thread(tid int) (Proc, error) { + tfs := fsi.FS(proc.path("task")) + if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: tfs}, nil +} diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index 20ceb77e2df7..cdedcae996d8 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -26,7 +26,9 @@ import ( ) // The VM interface is described at -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// // Each setting is exposed as a single file. // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array // and numa_zonelist_order (deprecated) which is a string. diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 8e8460d4c59b..8bc4bf34a09d 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -80,7 +80,8 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro return cfg, nil } - for i, v := range cfg.GlobalConfig.ExternalLabels { + b := labels.ScratchBuilder{} + cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) { newV := os.Expand(v.Value, func(s string) string { if s == "$" { return "$" @@ -93,10 +94,10 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro }) if newV != v.Value { level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) - v.Value = newV - cfg.GlobalConfig.ExternalLabels[i] = v } - } + b.Add(v.Name, newV) + }) + cfg.GlobalConfig.ExternalLabels = b.Labels() return cfg, nil } @@ -112,10 +113,6 @@ func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log. } if agentMode { - if len(cfg.RemoteWriteConfigs) == 0 { - return nil, errors.New("at least one remote_write target must be specified in agent mode") - } - if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 { return nil, errors.New("field alerting is not allowed in agent mode") } @@ -361,13 +358,16 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } - for _, l := range gc.ExternalLabels { + if err := gc.ExternalLabels.Validate(func(l labels.Label) error { if !model.LabelName(l.Name).IsValid() { return fmt.Errorf("%q is not a valid label name", l.Name) } if !model.LabelValue(l.Value).IsValid() { return fmt.Errorf("%q is not a valid label value", l.Value) } + return nil + }); err != nil { + return err } // First set the correct scrape interval, then check that the timeout @@ -394,7 +394,7 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // isZero returns true iff the global config is the zero value. func (c *GlobalConfig) isZero() bool { - return c.ExternalLabels == nil && + return c.ExternalLabels.IsEmpty() && c.ScrapeInterval == 0 && c.ScrapeTimeout == 0 && c.EvaluationInterval == 0 && diff --git a/vendor/github.com/prometheus/prometheus/discovery/azure/azure.go b/vendor/github.com/prometheus/prometheus/discovery/azure/azure.go index 44576c79b975..098fbb4c5fa8 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/azure/azure.go +++ b/vendor/github.com/prometheus/prometheus/discovery/azure/azure.go @@ -55,6 +55,7 @@ const ( azureLabelMachinePublicIP = azureLabel + "machine_public_ip" azureLabelMachineTag = azureLabel + "machine_tag_" azureLabelMachineScaleSet = azureLabel + "machine_scale_set" + azureLabelMachineSize = azureLabel + "machine_size" authMethodOAuth = "OAuth" authMethodManagedIdentity = "ManagedIdentity" @@ -261,6 +262,7 @@ type virtualMachine struct { ScaleSet string Tags map[string]*string NetworkInterfaces []string + Size string } // Create a new azureResource object from an ID string. @@ -343,6 +345,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { azureLabelMachineOSType: model.LabelValue(vm.OsType), azureLabelMachineLocation: model.LabelValue(vm.Location), azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroup), + azureLabelMachineSize: model.LabelValue(vm.Size), } if vm.ScaleSet != "" { @@ -514,6 +517,7 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine { tags := map[string]*string{} networkInterfaces := []string{} var computerName string + var size string if vm.Tags != nil { tags = vm.Tags @@ -525,10 +529,13 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine { } } - if vm.VirtualMachineProperties != nil && - vm.VirtualMachineProperties.OsProfile != nil && - vm.VirtualMachineProperties.OsProfile.ComputerName != nil { - computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName) + if vm.VirtualMachineProperties != nil { + if vm.VirtualMachineProperties.OsProfile != nil && vm.VirtualMachineProperties.OsProfile.ComputerName != nil { + computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName) + } + if vm.VirtualMachineProperties.HardwareProfile != nil { + size = string(vm.VirtualMachineProperties.HardwareProfile.VMSize) + } } return virtualMachine{ @@ -541,6 +548,7 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine { ScaleSet: "", Tags: tags, NetworkInterfaces: networkInterfaces, + Size: size, } } @@ -549,6 +557,7 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin tags := map[string]*string{} networkInterfaces := []string{} var computerName string + var size string if vm.Tags != nil { tags = vm.Tags @@ -560,8 +569,13 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin } } - if vm.VirtualMachineScaleSetVMProperties != nil && vm.VirtualMachineScaleSetVMProperties.OsProfile != nil { - computerName = *(vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName) + if vm.VirtualMachineScaleSetVMProperties != nil { + if vm.VirtualMachineScaleSetVMProperties.OsProfile != nil && vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName != nil { + computerName = *(vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName) + } + if vm.VirtualMachineScaleSetVMProperties.HardwareProfile != nil { + size = string(vm.VirtualMachineScaleSetVMProperties.HardwareProfile.VMSize) + } } return virtualMachine{ @@ -574,6 +588,7 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin ScaleSet: scaleSetName, Tags: tags, NetworkInterfaces: networkInterfaces, + Size: size, } } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go index 0d9c5a25b14e..135735154c03 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go @@ -259,6 +259,8 @@ const ( endpointSlicePortLabel = metaLabelPrefix + "endpointslice_port" endpointSlicePortAppProtocol = metaLabelPrefix + "endpointslice_port_app_protocol" endpointSliceEndpointConditionsReadyLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_ready" + endpointSliceEndpointConditionsServingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_serving" + endpointSliceEndpointConditionsTerminatingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_terminating" endpointSliceEndpointHostnameLabel = metaLabelPrefix + "endpointslice_endpoint_hostname" endpointSliceAddressTargetKindLabel = metaLabelPrefix + "endpointslice_address_target_kind" endpointSliceAddressTargetNameLabel = metaLabelPrefix + "endpointslice_address_target_name" @@ -313,6 +315,14 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.conditions().ready())) } + if ep.conditions().serving() != nil { + target[endpointSliceEndpointConditionsServingLabel] = lv(strconv.FormatBool(*ep.conditions().serving())) + } + + if ep.conditions().terminating() != nil { + target[endpointSliceEndpointConditionsTerminatingLabel] = lv(strconv.FormatBool(*ep.conditions().terminating())) + } + if ep.hostname() != nil { target[endpointSliceEndpointHostnameLabel] = lv(*ep.hostname()) } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go index 87484b06fdeb..5a21f1b899a2 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go @@ -49,6 +49,8 @@ type endpointSliceEndpointAdaptor interface { type endpointSliceEndpointConditionsAdaptor interface { ready() *bool + serving() *bool + terminating() *bool } // Adaptor for k8s.io/api/discovery/v1 @@ -193,6 +195,14 @@ func (e *endpointSliceEndpointConditionsAdaptorV1) ready() *bool { return e.endpointConditions.Ready } +func (e *endpointSliceEndpointConditionsAdaptorV1) serving() *bool { + return e.endpointConditions.Serving +} + +func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool { + return e.endpointConditions.Terminating +} + type endpointSliceEndpointAdaptorV1beta1 struct { endpoint v1beta1.Endpoint } @@ -237,6 +247,14 @@ func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool { return e.endpointConditions.Ready } +func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool { + return e.endpointConditions.Serving +} + +func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool { + return e.endpointConditions.Terminating +} + type endpointSlicePortAdaptorV1 struct { endpointPort v1.EndpointPort } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go index 0ffedc51ed04..0f03e2cdb722 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go @@ -382,7 +382,8 @@ func mapSelector(rawSelector []SelectorConfig) roleSelector { return rs } -const resyncPeriod = 10 * time.Minute +// Disable the informer's resync, which just periodically resends already processed updates and distort SD metrics. +const resyncDisabled = 0 // Run implements the discoverer interface. func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { @@ -475,8 +476,8 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { eps := NewEndpointSlice( log.With(d.logger, "role", "endpointslice"), informer, - cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod), - cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod), + cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), + cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), nodeInf, ) d.discoverers = append(d.discoverers, eps) @@ -534,8 +535,8 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { eps := NewEndpoints( log.With(d.logger, "role", "endpoint"), d.newEndpointsByNodeInformer(elw), - cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod), - cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod), + cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), + cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), nodeInf, ) d.discoverers = append(d.discoverers, eps) @@ -589,7 +590,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } svc := NewService( log.With(d.logger, "role", "service"), - cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod), + cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), ) d.discoverers = append(d.discoverers, svc) go svc.informer.Run(ctx.Done()) @@ -627,7 +628,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { return i.Watch(ctx, options) }, } - informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncPeriod) + informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) } else { i := d.client.NetworkingV1beta1().Ingresses(namespace) ilw := &cache.ListWatch{ @@ -642,7 +643,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { return i.Watch(ctx, options) }, } - informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncPeriod) + informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled) } ingress := NewIngress( log.With(d.logger, "role", "ingress"), @@ -732,7 +733,7 @@ func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer { return d.client.CoreV1().Nodes().Watch(ctx, options) }, } - return cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncPeriod) + return cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled) } func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer { @@ -747,13 +748,13 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde } } - return cache.NewSharedIndexInformer(plw, &apiv1.Pod{}, resyncPeriod, indexers) + return cache.NewSharedIndexInformer(plw, &apiv1.Pod{}, resyncDisabled, indexers) } func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) if !d.attachMetadata.Node { - return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncPeriod, indexers) + return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) } indexers[nodeIndex] = func(obj interface{}) ([]string, error) { @@ -773,13 +774,13 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share return nodes, nil } - return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncPeriod, indexers) + return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) } func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) if !d.attachMetadata.Node { - cache.NewSharedIndexInformer(plw, &disv1.EndpointSlice{}, resyncPeriod, indexers) + cache.NewSharedIndexInformer(plw, &disv1.EndpointSlice{}, resyncDisabled, indexers) } indexers[nodeIndex] = func(obj interface{}) ([]string, error) { @@ -806,7 +807,7 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object return nodes, nil } - return cache.NewSharedIndexInformer(plw, object, resyncPeriod, indexers) + return cache.NewSharedIndexInformer(plw, object, resyncDisabled, indexers) } func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) { diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go index 2e3687a0645a..396720c223ca 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go @@ -183,6 +183,7 @@ const ( podNameLabel = metaLabelPrefix + "pod_name" podIPLabel = metaLabelPrefix + "pod_ip" podContainerNameLabel = metaLabelPrefix + "pod_container_name" + podContainerIDLabel = metaLabelPrefix + "pod_container_id" podContainerImageLabel = metaLabelPrefix + "pod_container_image" podContainerPortNameLabel = metaLabelPrefix + "pod_container_port_name" podContainerPortNumberLabel = metaLabelPrefix + "pod_container_port_number" @@ -248,6 +249,24 @@ func podLabels(pod *apiv1.Pod) model.LabelSet { return ls } +func (p *Pod) findPodContainerStatus(statuses *[]apiv1.ContainerStatus, containerName string) (*apiv1.ContainerStatus, error) { + for _, s := range *statuses { + if s.Name == containerName { + return &s, nil + } + } + return nil, fmt.Errorf("cannot find container with name %v", containerName) +} + +func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string { + cStatus, err := p.findPodContainerStatus(statuses, containerName) + if err != nil { + level.Debug(p.logger).Log("msg", "cannot find container ID", "err", err) + return "" + } + return cStatus.ContainerID +} + func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { tg := &targetgroup.Group{ Source: podSource(pod), @@ -267,6 +286,12 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { for i, c := range containers { isInit := i >= len(pod.Spec.Containers) + cStatuses := &pod.Status.ContainerStatuses + if isInit { + cStatuses = &pod.Status.InitContainerStatuses + } + cID := p.findPodContainerID(cStatuses, c.Name) + // If no ports are defined for the container, create an anonymous // target per container. if len(c.Ports) == 0 { @@ -275,6 +300,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: lv(pod.Status.PodIP), podContainerNameLabel: lv(c.Name), + podContainerIDLabel: lv(cID), podContainerImageLabel: lv(c.Image), podContainerIsInit: lv(strconv.FormatBool(isInit)), }) @@ -288,6 +314,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { tg.Targets = append(tg.Targets, model.LabelSet{ model.AddressLabel: lv(addr), podContainerNameLabel: lv(c.Name), + podContainerIDLabel: lv(cID), podContainerImageLabel: lv(c.Image), podContainerPortNumberLabel: lv(ports), podContainerPortNameLabel: lv(port.Name), diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index b7357fa6cde3..8b304a0faf8e 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -428,11 +428,11 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } typ := cfg.Name() d, err := cfg.NewDiscoverer(DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ), + Logger: log.With(m.logger, "discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ) + level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go index d75afd10ed21..256679a8c96b 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -27,6 +27,8 @@ import ( // used to represent a histogram with integer counts and thus serves as a more // generalized representation. type FloatHistogram struct { + // Counter reset information. + CounterResetHint CounterResetHint // Currently valid schema numbers are -4 <= n <= 8. They are all for // base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. Or @@ -244,6 +246,37 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { return h } +// Equals returns true if the given float histogram matches exactly. +// Exact match is when there are no new buckets (even empty) and no missing buckets, +// and all the bucket values match. Spans can have different empty length spans in between, +// but they must represent the same bucket layout to match. +func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { + if h2 == nil { + return false + } + + if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || + h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum { + return false + } + + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + return false + } + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + + if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + return false + } + if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + + return true +} + // addBucket takes the "coordinates" of the last bucket that was handled and // adds the provided bucket after it. If a corresponding bucket exists, the // count is added. If not, the bucket is inserted. The updated slices and the diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go index c62be0b08cf7..e1de5ffb52da 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go @@ -25,14 +25,14 @@ type BucketCount interface { float64 | uint64 } -// internalBucketCount is used internally by Histogram and FloatHistogram. The +// InternalBucketCount is used internally by Histogram and FloatHistogram. The // difference to the BucketCount above is that Histogram internally uses deltas // between buckets rather than absolute counts (while FloatHistogram uses // absolute counts directly). Go type parameters don't allow type // specialization. Therefore, where special treatment of deltas between buckets // vs. absolute counts is important, this information has to be provided as a // separate boolean parameter "deltaBuckets" -type internalBucketCount interface { +type InternalBucketCount interface { float64 | int64 } @@ -86,7 +86,7 @@ type BucketIterator[BC BucketCount] interface { // implementations, together with an implementation of the At method. This // iterator can be embedded in full implementations of BucketIterator to save on // code replication. -type baseBucketIterator[BC BucketCount, IBC internalBucketCount] struct { +type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { schema int32 spans []Span buckets []IBC @@ -121,7 +121,7 @@ func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] { // compactBuckets is a generic function used by both Histogram.Compact and // FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are // deltas. Set it to false if the buckets contain absolute counts. -func compactBuckets[IBC internalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) { +func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) { // Fast path: If there are no empty buckets AND no offset in any span is // <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return // immediately. We check that first because it's cheap and presumably @@ -327,6 +327,18 @@ func compactBuckets[IBC internalBucketCount](buckets []IBC, spans []Span, maxEmp return buckets, spans } +func bucketsMatch[IBC InternalBucketCount](b1, b2 []IBC) bool { + if len(b1) != len(b2) { + return false + } + for i, b := range b1 { + if b != b2[i] { + return false + } + } + return true +} + func getBound(idx, schema int32) float64 { // Here a bit of context about the behavior for the last bucket counting // regular numbers (called simply "last bucket" below) and the bucket diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go index 934c4dde9c25..6d425307c555 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go @@ -19,6 +19,17 @@ import ( "strings" ) +// CounterResetHint contains the known information about a counter reset, +// or alternatively that we are dealing with a gauge histogram, where counter resets do not apply. +type CounterResetHint byte + +const ( + UnknownCounterReset CounterResetHint = iota // UnknownCounterReset means we cannot say if this histogram signals a counter reset or not. + CounterReset // CounterReset means there was definitely a counter reset starting from this histogram. + NotCounterReset // NotCounterReset means there was definitely no counter reset with this histogram. + GaugeType // GaugeType means this is a gauge histogram, where counter resets do not happen. +) + // Histogram encodes a sparse, high-resolution histogram. See the design // document for full details: // https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit# @@ -35,6 +46,8 @@ import ( // // Which bucket indices are actually used is determined by the spans. type Histogram struct { + // Counter reset information. + CounterResetHint CounterResetHint // Currently valid schema numbers are -4 <= n <= 8. They are all for // base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. Or @@ -250,18 +263,6 @@ func allEmptySpans(s []Span) bool { return true } -func bucketsMatch(b1, b2 []int64) bool { - if len(b1) != len(b2) { - return false - } - for i, b := range b1 { - if b != b2[i] { - return false - } - } - return true -} - // Compact works like FloatHistogram.Compact. See there for detailed // explanations. func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram { @@ -307,15 +308,16 @@ func (h *Histogram) ToFloat() *FloatHistogram { } return &FloatHistogram{ - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: float64(h.ZeroCount), - Count: float64(h.Count), - Sum: h.Sum, - PositiveSpans: positiveSpans, - NegativeSpans: negativeSpans, - PositiveBuckets: positiveBuckets, - NegativeBuckets: negativeBuckets, + CounterResetHint: h.CounterResetHint, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.ZeroCount), + Count: float64(h.Count), + Sum: h.Sum, + PositiveSpans: positiveSpans, + NegativeSpans: negativeSpans, + PositiveBuckets: positiveBuckets, + NegativeBuckets: negativeBuckets, } } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index aafba218aa90..36a0e6cb3587 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -357,9 +357,7 @@ func EmptyLabels() Labels { // The caller has to guarantee that all label names are unique. func New(ls ...Label) Labels { set := make(Labels, 0, len(ls)) - for _, l := range ls { - set = append(set, l) - } + set = append(set, ls...) sort.Sort(set) return set @@ -414,6 +412,49 @@ func Compare(a, b Labels) int { return len(a) - len(b) } +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + (*ls) = append((*ls)[:0], b...) +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls) == 0 +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for _, l := range ls { + f(l) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for _, l := range ls { + if err := f(l); err != nil { + return err + } + } + return nil +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + for i, l := range *ls { + (*ls)[i].Name = intern(l.Name) + (*ls)[i].Value = intern(l.Value) + } +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + for _, l := range ls { + release(l.Name) + release(l.Value) + } +} + // Builder allows modifying Labels. type Builder struct { base Labels @@ -470,7 +511,7 @@ Outer: return b } -// Set the name/value pair as a label. +// Set the name/value pair as a label. A value of "" means delete that label. func (b *Builder) Set(n, v string) *Builder { if v == "" { // Empty labels are the same as missing labels. @@ -525,3 +566,40 @@ Outer: } return res } + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + add Labels +} + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{add: make([]Label, 0, n)} +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + sort.Sort(b.add) +} + +// Asssign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(ls Labels) { + b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. +} + +// Return the name/value pairs added so far as a Labels object. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. + return append([]Label{}, b.add...) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go index a683588d1667..05b8168825b2 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go @@ -17,7 +17,6 @@ import ( "bufio" "fmt" "os" - "sort" "strings" ) @@ -51,13 +50,14 @@ func ReadLabels(fn string, n int) ([]Labels, error) { defer f.Close() scanner := bufio.NewScanner(f) + b := ScratchBuilder{} var mets []Labels hashes := map[uint64]struct{}{} i := 0 for scanner.Scan() && i < n { - m := make(Labels, 0, 10) + b.Reset() r := strings.NewReplacer("\"", "", "{", "", "}", "") s := r.Replace(scanner.Text()) @@ -65,10 +65,11 @@ func ReadLabels(fn string, n int) ([]Labels, error) { labelChunks := strings.Split(s, ",") for _, labelChunk := range labelChunks { split := strings.Split(labelChunk, ":") - m = append(m, Label{Name: split[0], Value: split[1]}) + b.Add(split[0], split[1]) } // Order of the k/v labels matters, don't assume we'll always receive them already sorted. - sort.Sort(m) + b.Sort() + m := b.Labels() h := m.Hash() if _, ok := hashes[h]; ok { diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index c731f6e0d3b3..0cc6eeeb7ed2 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -203,20 +203,20 @@ func (re Regexp) String() string { // Process returns a relabeled copy of the given label set. The relabel configurations // are applied in order of input. -// If a label set is dropped, nil is returned. +// If a label set is dropped, EmptyLabels and false is returned. // May return the input labelSet modified. -func Process(lbls labels.Labels, cfgs ...*Config) labels.Labels { - lb := labels.NewBuilder(nil) +func Process(lbls labels.Labels, cfgs ...*Config) (ret labels.Labels, keep bool) { + lb := labels.NewBuilder(labels.EmptyLabels()) for _, cfg := range cfgs { - lbls = relabel(lbls, cfg, lb) - if lbls == nil { - return nil + lbls, keep = relabel(lbls, cfg, lb) + if !keep { + return labels.EmptyLabels(), false } } - return lbls + return lbls, true } -func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels { +func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) (ret labels.Labels, keep bool) { var va [16]string values := va[:0] if len(cfg.SourceLabels) > cap(values) { @@ -232,19 +232,19 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels switch cfg.Action { case Drop: if cfg.Regex.MatchString(val) { - return nil + return labels.EmptyLabels(), false } case Keep: if !cfg.Regex.MatchString(val) { - return nil + return labels.EmptyLabels(), false } case DropEqual: if lset.Get(cfg.TargetLabel) == val { - return nil + return labels.EmptyLabels(), false } case KeepEqual: if lset.Get(cfg.TargetLabel) != val { - return nil + return labels.EmptyLabels(), false } case Replace: indexes := cfg.Regex.FindStringSubmatchIndex(val) @@ -271,29 +271,29 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) labels.Labels mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) case LabelMap: - for _, l := range lset { + lset.Range(func(l labels.Label) { if cfg.Regex.MatchString(l.Name) { res := cfg.Regex.ReplaceAllString(l.Name, cfg.Replacement) lb.Set(res, l.Value) } - } + }) case LabelDrop: - for _, l := range lset { + lset.Range(func(l labels.Label) { if cfg.Regex.MatchString(l.Name) { lb.Del(l.Name) } - } + }) case LabelKeep: - for _, l := range lset { + lset.Range(func(l labels.Label) { if !cfg.Regex.MatchString(l.Name) { lb.Del(l.Name) } - } + }) default: panic(fmt.Errorf("relabel: unknown relabel action type %q", cfg.Action)) } - return lb.Labels(lset) + return lb.Labels(lset), true } // sum64 sums the md5 hash to an uint64. diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index f1d5f39257e6..30b3face0de8 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -143,22 +143,24 @@ type RuleGroup struct { // Rule describes an alerting or recording rule. type Rule struct { - Record string `yaml:"record,omitempty"` - Alert string `yaml:"alert,omitempty"` - Expr string `yaml:"expr"` - For model.Duration `yaml:"for,omitempty"` - Labels map[string]string `yaml:"labels,omitempty"` - Annotations map[string]string `yaml:"annotations,omitempty"` + Record string `yaml:"record,omitempty"` + Alert string `yaml:"alert,omitempty"` + Expr string `yaml:"expr"` + For model.Duration `yaml:"for,omitempty"` + KeepFiringFor model.Duration `yaml:"keep_firing_for,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + Annotations map[string]string `yaml:"annotations,omitempty"` } // RuleNode adds yaml.v3 layer to support line and column outputs for invalid rules. type RuleNode struct { - Record yaml.Node `yaml:"record,omitempty"` - Alert yaml.Node `yaml:"alert,omitempty"` - Expr yaml.Node `yaml:"expr"` - For model.Duration `yaml:"for,omitempty"` - Labels map[string]string `yaml:"labels,omitempty"` - Annotations map[string]string `yaml:"annotations,omitempty"` + Record yaml.Node `yaml:"record,omitempty"` + Alert yaml.Node `yaml:"alert,omitempty"` + Expr yaml.Node `yaml:"expr"` + For model.Duration `yaml:"for,omitempty"` + KeepFiringFor model.Duration `yaml:"keep_firing_for,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` + Annotations map[string]string `yaml:"annotations,omitempty"` } // Validate the rule and return a list of encountered errors. @@ -208,6 +210,12 @@ func (r *RuleNode) Validate() (nodes []WrappedError) { node: &r.Record, }) } + if r.KeepFiringFor != 0 { + nodes = append(nodes, WrappedError{ + err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"), + node: &r.Record, + }) + } if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) { nodes = append(nodes, WrappedError{ err: fmt.Errorf("invalid recording rule name: %s", r.Record.Value), diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index 932a3d96db14..15a95a959249 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "math" - "sort" "strings" "unicode/utf8" @@ -82,6 +81,7 @@ func (l *openMetricsLexer) Error(es string) { // This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit type OpenMetricsParser struct { l *openMetricsLexer + builder labels.ScratchBuilder series []byte text []byte mtype MetricType @@ -113,8 +113,8 @@ func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { return p.series, nil, p.val } -// Histogram always returns (nil, nil, nil, nil) because OpenMetrics does not support -// sparse histograms. +// Histogram returns (nil, nil, nil, nil) for now because OpenMetrics does not +// support sparse histograms yet. func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { return nil, nil, nil, nil } @@ -158,14 +158,11 @@ func (p *OpenMetricsParser) Comment() []byte { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. func (p *OpenMetricsParser) Metric(l *labels.Labels) string { - // Allocate the full immutable string immediately, so we just - // have to create references on it below. + // Copy the buffer to a string: this is only necessary for the return value. s := string(p.series) - *l = append(*l, labels.Label{ - Name: labels.MetricName, - Value: s[:p.offsets[0]-p.start], - }) + p.builder.Reset() + p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start]) for i := 1; i < len(p.offsets); i += 4 { a := p.offsets[i] - p.start @@ -173,16 +170,16 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start + value := s[c:d] // Replacer causes allocations. Replace only when necessary. if strings.IndexByte(s[c:d], byte('\\')) >= 0 { - *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) - continue + value = lvalReplacer.Replace(value) } - *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) + p.builder.Add(s[a:b], value) } - // Sort labels. - sort.Sort(*l) + p.builder.Sort() + *l = p.builder.Labels() return s } @@ -204,17 +201,18 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { e.Ts = p.exemplarTs } + p.builder.Reset() for i := 0; i < len(p.eOffsets); i += 4 { a := p.eOffsets[i] - p.start b := p.eOffsets[i+1] - p.start c := p.eOffsets[i+2] - p.start d := p.eOffsets[i+3] - p.start - e.Labels = append(e.Labels, labels.Label{Name: s[a:b], Value: s[c:d]}) + p.builder.Add(s[a:b], s[c:d]) } - // Sort the labels. - sort.Sort(e.Labels) + p.builder.Sort() + e.Labels = p.builder.Labels() return true } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index a3bb8bb9bfdc..b0c963392d24 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -21,7 +21,6 @@ import ( "fmt" "io" "math" - "sort" "strconv" "strings" "unicode/utf8" @@ -144,6 +143,7 @@ func (l *promlexer) Error(es string) { // Prometheus text exposition format. type PromParser struct { l *promlexer + builder labels.ScratchBuilder series []byte text []byte mtype MetricType @@ -168,8 +168,8 @@ func (p *PromParser) Series() ([]byte, *int64, float64) { return p.series, nil, p.val } -// Histogram always returns (nil, nil, nil, nil) because the Prometheus text format -// does not support sparse histograms. +// Histogram returns (nil, nil, nil, nil) for now because the Prometheus text +// format does not support sparse histograms yet. func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { return nil, nil, nil, nil } @@ -212,14 +212,11 @@ func (p *PromParser) Comment() []byte { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. func (p *PromParser) Metric(l *labels.Labels) string { - // Allocate the full immutable string immediately, so we just - // have to create references on it below. + // Copy the buffer to a string: this is only necessary for the return value. s := string(p.series) - *l = append(*l, labels.Label{ - Name: labels.MetricName, - Value: s[:p.offsets[0]-p.start], - }) + p.builder.Reset() + p.builder.Add(labels.MetricName, s[:p.offsets[0]-p.start]) for i := 1; i < len(p.offsets); i += 4 { a := p.offsets[i] - p.start @@ -227,16 +224,16 @@ func (p *PromParser) Metric(l *labels.Labels) string { c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start + value := s[c:d] // Replacer causes allocations. Replace only when necessary. if strings.IndexByte(s[c:d], byte('\\')) >= 0 { - *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) - continue + value = lvalReplacer.Replace(value) } - *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) + p.builder.Add(s[a:b], value) } - // Sort labels to maintain the sorted labels invariant. - sort.Sort(*l) + p.builder.Sort() + *l = p.builder.Labels() return s } @@ -343,7 +340,7 @@ func (p *PromParser) Next() (Entry, error) { t2 = p.nextToken() } if t2 != tValue { - return EntryInvalid, parseError("expected value after metric", t) + return EntryInvalid, parseError("expected value after metric", t2) } if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { return EntryInvalid, err @@ -353,7 +350,7 @@ func (p *PromParser) Next() (Entry, error) { p.val = math.Float64frombits(value.NormalNaN) } p.hasTS = false - switch p.nextToken() { + switch t := p.nextToken(); t { case tLinebreak: break case tTimestamp: @@ -362,7 +359,7 @@ func (p *PromParser) Next() (Entry, error) { return EntryInvalid, err } if t2 := p.nextToken(); t2 != tLinebreak { - return EntryInvalid, parseError("expected next entry after timestamp", t) + return EntryInvalid, parseError("expected next entry after timestamp", t2) } default: return EntryInvalid, parseError("expected timestamp or new record", t) diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go index a9c940879eb3..eca145955e86 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -19,7 +19,6 @@ import ( "fmt" "io" "math" - "sort" "strings" "unicode/utf8" @@ -59,6 +58,8 @@ type ProtobufParser struct { // that we have to decode the next MetricFamily. state Entry + builder labels.ScratchBuilder // held here to reduce allocations when building Labels + mf *dto.MetricFamily // The following are just shenanigans to satisfy the Parser interface. @@ -104,7 +105,7 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { default: v = s.GetQuantile()[p.fieldPos].GetValue() } - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: // This should only happen for a legacy histogram. h := m.GetHistogram() switch p.fieldPos { @@ -169,6 +170,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his fh.NegativeSpans[i].Offset = span.GetOffset() fh.NegativeSpans[i].Length = span.GetLength() } + if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + fh.CounterResetHint = histogram.GaugeType + } fh.Compact(0) if ts != 0 { return p.metricBytes.Bytes(), &ts, nil, &fh @@ -198,6 +202,9 @@ func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *his sh.NegativeSpans[i].Offset = span.GetOffset() sh.NegativeSpans[i].Length = span.GetLength() } + if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + sh.CounterResetHint = histogram.GaugeType + } sh.Compact(0) if ts != 0 { return p.metricBytes.Bytes(), &ts, &sh, nil @@ -224,6 +231,8 @@ func (p *ProtobufParser) Type() ([]byte, MetricType) { return n, MetricTypeGauge case dto.MetricType_HISTOGRAM: return n, MetricTypeHistogram + case dto.MetricType_GAUGE_HISTOGRAM: + return n, MetricTypeGaugeHistogram case dto.MetricType_SUMMARY: return n, MetricTypeSummary } @@ -245,23 +254,19 @@ func (p *ProtobufParser) Comment() []byte { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. func (p *ProtobufParser) Metric(l *labels.Labels) string { - *l = append(*l, labels.Label{ - Name: labels.MetricName, - Value: p.getMagicName(), - }) + p.builder.Reset() + p.builder.Add(labels.MetricName, p.getMagicName()) for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { - *l = append(*l, labels.Label{ - Name: lp.GetName(), - Value: lp.GetValue(), - }) + p.builder.Add(lp.GetName(), lp.GetValue()) } if needed, name, value := p.getMagicLabel(); needed { - *l = append(*l, labels.Label{Name: name, Value: value}) + p.builder.Add(name, value) } // Sort labels to maintain the sorted labels invariant. - sort.Sort(*l) + p.builder.Sort() + *l = p.builder.Labels() return p.metricBytes.String() } @@ -276,7 +281,7 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { switch p.mf.GetType() { case dto.MetricType_COUNTER: exProto = m.GetCounter().GetExemplar() - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: bb := m.GetHistogram().GetBucket() if p.fieldPos < 0 { if p.state == EntrySeries { @@ -305,12 +310,12 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { ex.HasTs = true ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000) } + p.builder.Reset() for _, lp := range exProto.GetLabel() { - ex.Labels = append(ex.Labels, labels.Label{ - Name: lp.GetName(), - Value: lp.GetValue(), - }) + p.builder.Add(lp.GetName(), lp.GetValue()) } + p.builder.Sort() + ex.Labels = p.builder.Labels() return true } @@ -334,7 +339,7 @@ func (p *ProtobufParser) Next() (Entry, error) { } // We are at the beginning of a metric family. Put only the name - // into metricBytes and validate only name and help for now. + // into metricBytes and validate only name, help, and type for now. name := p.mf.GetName() if !model.IsValidMetricName(model.LabelValue(name)) { return EntryInvalid, errors.Errorf("invalid metric name: %s", name) @@ -342,6 +347,17 @@ func (p *ProtobufParser) Next() (Entry, error) { if help := p.mf.GetHelp(); !utf8.ValidString(help) { return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help) } + switch p.mf.GetType() { + case dto.MetricType_COUNTER, + dto.MetricType_GAUGE, + dto.MetricType_HISTOGRAM, + dto.MetricType_GAUGE_HISTOGRAM, + dto.MetricType_SUMMARY, + dto.MetricType_UNTYPED: + // All good. + default: + return EntryInvalid, errors.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + } p.metricBytes.Reset() p.metricBytes.WriteString(name) @@ -349,7 +365,8 @@ func (p *ProtobufParser) Next() (Entry, error) { case EntryHelp: p.state = EntryType case EntryType: - if p.mf.GetType() == dto.MetricType_HISTOGRAM && + t := p.mf.GetType() + if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { p.state = EntryHistogram } else { @@ -359,8 +376,11 @@ func (p *ProtobufParser) Next() (Entry, error) { return EntryInvalid, err } case EntryHistogram, EntrySeries: + t := p.mf.GetType() if p.state == EntrySeries && !p.fieldsDone && - (p.mf.GetType() == dto.MetricType_SUMMARY || p.mf.GetType() == dto.MetricType_HISTOGRAM) { + (t == dto.MetricType_SUMMARY || + t == dto.MetricType_HISTOGRAM || + t == dto.MetricType_GAUGE_HISTOGRAM) { p.fieldPos++ } else { p.metricPos++ @@ -421,7 +441,7 @@ func (p *ProtobufParser) getMagicName() string { if p.fieldPos == -1 { return p.mf.GetName() + "_sum" } - if t == dto.MetricType_HISTOGRAM { + if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { return p.mf.GetName() + "_bucket" } return p.mf.GetName() @@ -439,7 +459,7 @@ func (p *ProtobufParser) getMagicLabel() (bool, string, string) { q := qq[p.fieldPos] p.fieldsDone = p.fieldPos == len(qq)-1 return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile()) - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket() if p.fieldPos >= len(bb) { p.fieldsDone = true diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index fd89a029c7d0..79697d079668 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -353,11 +353,11 @@ func (n *Manager) Send(alerts ...*Alert) { for _, a := range alerts { lb := labels.NewBuilder(a.Labels) - for _, l := range n.opts.ExternalLabels { + n.opts.ExternalLabels.Range(func(l labels.Label) { if a.Labels.Get(l.Name) == "" { lb.Set(l.Name, l.Value) } - } + }) a.Labels = lb.Labels(a.Labels) } @@ -394,8 +394,8 @@ func (n *Manager) relabelAlerts(alerts []*Alert) []*Alert { var relabeledAlerts []*Alert for _, alert := range alerts { - labels := relabel.Process(alert.Labels, n.opts.RelabelConfigs...) - if labels != nil { + labels, keep := relabel.Process(alert.Labels, n.opts.RelabelConfigs...) + if keep { alert.Labels = labels relabeledAlerts = append(relabeledAlerts, alert) } @@ -570,9 +570,9 @@ func alertsToOpenAPIAlerts(alerts []*Alert) models.PostableAlerts { func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet { apiLabelSet := models.LabelSet{} - for _, label := range modelLabelSet { + modelLabelSet.Range(func(label labels.Label) { apiLabelSet[label.Name] = label.Value - } + }) return apiLabelSet } @@ -719,9 +719,9 @@ func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig } } - lset := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...) - if lset == nil { - droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{lbls}) + lset, keep := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...) + if !keep { + droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{labels.New(lbls...)}) continue } diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index a2b438489328..ddfb26b13fd7 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -653,12 +653,13 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval query.sampleStats.InitStepTracking(start, start, 1) val, warnings, err := evaluator.Eval(s.Expr) + + evalSpanTimer.Finish() + if err != nil { return nil, warnings, err } - evalSpanTimer.Finish() - var mat Matrix switch result := val.(type) { @@ -704,10 +705,12 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) val, warnings, err := evaluator.Eval(s.Expr) + + evalSpanTimer.Finish() + if err != nil { return nil, warnings, err } - evalSpanTimer.Finish() mat, ok := val.(Matrix) if !ok { @@ -1029,6 +1032,14 @@ type EvalNodeHelper struct { resultMetric map[string]labels.Labels } +func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { + if enh.lb == nil { + enh.lb = labels.NewBuilder(lbls) + } else { + enh.lb.Reset(lbls) + } +} + // DropMetricName is a cached version of DropMetricName. func (enh *EvalNodeHelper) DropMetricName(l labels.Labels) labels.Labels { if enh.Dmn == nil { @@ -1390,10 +1401,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { enh := &EvalNodeHelper{Out: make(Vector, 0, 1)} // Process all the calls for one time series at a time. it := storage.NewBuffer(selRange) + var chkIter chunkenc.Iterator for i, s := range selVS.Series { ev.currentSamples -= len(points) points = points[:0] - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) metric := selVS.Series[i].Labels() // The last_over_time function acts like offset; thus, it // should keep the metric name. For all the other range @@ -1562,7 +1575,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case *parser.NumberLiteral: return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) { - return append(enh.Out, Sample{Point: Point{V: e.Val}}), nil + return append(enh.Out, Sample{Point: Point{V: e.Val}, Metric: labels.EmptyLabels()}), nil }) case *parser.StringLiteral: @@ -1575,8 +1588,10 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } mat := make(Matrix, 0, len(e.Series)) it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator for i, s := range e.Series { - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) ss := Series{ Metric: e.Series[i].Labels(), Points: getPointSlice(numSteps), @@ -1720,8 +1735,10 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect } vec := make(Vector, 0, len(node.Series)) it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator for i, s := range node.Series { - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) t, v, h, ok := ev.vectorSelectorSingle(it, node, ts) if ok { @@ -1809,12 +1826,14 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, storag ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } + var chkIter chunkenc.Iterator series := vs.Series for i, s := range series { if err := contextDone(ev.ctx, "expression evaluation"); err != nil { ev.error(err) } - it.Reset(s.Iterator()) + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) ss := Series{ Metric: series[i].Labels(), } @@ -2141,12 +2160,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V enh.resultMetric = make(map[string]labels.Labels, len(enh.Out)) } - if enh.lb == nil { - enh.lb = labels.NewBuilder(lhs) - } else { - enh.lb.Reset(lhs) - } - + enh.resetBuilder(lhs) buf := bytes.NewBuffer(enh.lblResultBuf[:0]) enh.lblBuf = lhs.Bytes(enh.lblBuf) buf.Write(enh.lblBuf) @@ -2179,7 +2193,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } } - ret := enh.lb.Labels(nil) + ret := enh.lb.Labels(labels.EmptyLabels()) enh.resultMetric[str] = ret return ret } @@ -2219,7 +2233,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala } func dropMetricName(l labels.Labels) labels.Labels { - return labels.NewBuilder(l).Del(labels.MetricName).Labels(nil) + return labels.NewBuilder(l).Del(labels.MetricName).Labels(labels.EmptyLabels()) } // scalarBinop evaluates a binary operation between two Scalars. @@ -2346,15 +2360,14 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without } } - lb := labels.NewBuilder(nil) var buf []byte for si, s := range vec { metric := s.Metric if op == parser.COUNT_VALUES { - lb.Reset(metric) - lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) - metric = lb.Labels(nil) + enh.resetBuilder(metric) + enh.lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) + metric = enh.lb.Labels(labels.EmptyLabels()) // We've changed the metric so we have to recompute the grouping key. recomputeGroupingKey = true @@ -2371,14 +2384,18 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without group, ok := result[groupingKey] // Add a new group if it doesn't exist. if !ok { - lb.Reset(metric) + var m labels.Labels + enh.resetBuilder(metric) if without { - lb.Del(grouping...) - lb.Del(labels.MetricName) + enh.lb.Del(grouping...) + enh.lb.Del(labels.MetricName) + m = enh.lb.Labels(labels.EmptyLabels()) + } else if len(grouping) > 0 { + enh.lb.Keep(grouping...) + m = enh.lb.Labels(labels.EmptyLabels()) } else { - lb.Keep(grouping...) + m = labels.EmptyLabels() } - m := lb.Labels(nil) newAgg := &groupedAggregation{ labels: m, value: s.V, diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index d481cb7358b4..c5922002b0e7 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -957,7 +957,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev if !ok { sample.Metric = labels.NewBuilder(sample.Metric). Del(excludedLabels...). - Labels(nil) + Labels(labels.EmptyLabels()) mb = &metricWithBuckets{sample.Metric, nil} enh.signatureToMetricWithBuckets[string(enh.lblBuf)] = mb @@ -1077,7 +1077,7 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod if len(res) > 0 { lb.Set(dst, string(res)) } - outMetric = lb.Labels(nil) + outMetric = lb.Labels(labels.EmptyLabels()) enh.Dmn[h] = outMetric } } @@ -1145,7 +1145,7 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe lb.Set(dst, strval) } - outMetric = lb.Labels(nil) + outMetric = lb.Labels(labels.EmptyLabels()) enh.Dmn[h] = outMetric } @@ -1383,7 +1383,7 @@ func (s *vectorByReverseValueHeap) Pop() interface{} { // createLabelsForAbsentFunction returns the labels that are uniquely and exactly matched // in a given expression. It is used in the absent functions. func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { - m := labels.Labels{} + b := labels.NewBuilder(labels.EmptyLabels()) var lm []*labels.Matcher switch n := expr.(type) { @@ -1392,25 +1392,26 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { case *parser.MatrixSelector: lm = n.VectorSelector.(*parser.VectorSelector).LabelMatchers default: - return m + return labels.EmptyLabels() } - empty := []string{} + // The 'has' map implements backwards-compatibility for historic behaviour: + // e.g. in `absent(x{job="a",job="b",foo="bar"})` then `job` is removed from the output. + // Note this gives arguably wrong behaviour for `absent(x{job="a",job="a",foo="bar"})`. + has := make(map[string]bool, len(lm)) for _, ma := range lm { if ma.Name == labels.MetricName { continue } - if ma.Type == labels.MatchEqual && !m.Has(ma.Name) { - m = labels.NewBuilder(m).Set(ma.Name, ma.Value).Labels(nil) + if ma.Type == labels.MatchEqual && !has[ma.Name] { + b.Set(ma.Name, ma.Value) + has[ma.Name] = true } else { - empty = append(empty, ma.Name) + b.Del(ma.Name) } } - for _, v := range empty { - m = labels.NewBuilder(m).Del(v).Labels(nil) - } - return m + return b.Labels(labels.EmptyLabels()) } func stringFromArg(e parser.Expr) string { diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index 433f45259c96..461e854ac1a0 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -16,13 +16,13 @@ package parser import ( "math" - "sort" "strconv" "time" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" ) + %} %union { @@ -32,6 +32,7 @@ import ( matcher *labels.Matcher label labels.Label labels labels.Labels + lblList []labels.Label strings []string series []SequenceValue uint uint64 @@ -138,10 +139,9 @@ START_METRIC_SELECTOR // Type definitions for grammar rules. %type label_match_list %type label_matcher - %type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors - -%type label_set label_set_list metric +%type label_set metric +%type label_set_list %type