diff --git a/.github/workflows-doc.md b/.github/workflows-doc.md index 5d245f99a32..a4f406a9627 100644 --- a/.github/workflows-doc.md +++ b/.github/workflows-doc.md @@ -69,7 +69,7 @@ As of October 2020, GitHub Actions do not persist between different jobs in the - name: Compressing Images run: tar -zcvf images.tar.gz /tmp/images - name: Cache Images - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: Docker Images path: ./images.tar.gz diff --git a/.github/workflows/build-image.yml b/.github/workflows/build-image.yml new file mode 100644 index 00000000000..3eb6312a18a --- /dev/null +++ b/.github/workflows/build-image.yml @@ -0,0 +1,68 @@ +name: Build Image + +on: + push: + branches: [ master ] + paths: + - 'build-image/**' + - '.github/workflows/build-image.yml' + pull_request: + branches: [ master ] + paths: + - 'build-image/**' + - '.github/workflows/build-image.yml' + +jobs: + build: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + name: Checkout + with: + fetch-depth: 0 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Save image + run: make save-multiarch-build-image + + - name: Upload Docker Images Artifacts + uses: actions/upload-artifact@v4 + with: + name: build-image + path: | + ./build-image-amd64.tar + ./build-image-arm64.tar + if-no-files-found: error + + push: + needs: build + if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + name: Checkout + with: + fetch-depth: 0 + + - name: Download Docker Images Artifacts + uses: actions/download-artifact@v4 + with: + name: build-image + + - name: Load image + run: make load-multiarch-build-image + + - name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{secrets.QUAY_REGISTRY_USER}} + password: ${{secrets.QUAY_REGISTRY_PASSWORD}} + + - name: Push image + run: make push-multiarch-build-image diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index dfa752c5965..1e4fc24bc8b 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -5,13 +5,19 @@ on: branches: [master] tags: - v[0-9]+.[0-9]+.[0-9]+** # Tag filters not as strict due to different regex system on Github Actions + paths-ignore: + - 'build-image/**' + - '.github/workflows/build-image.yml' pull_request: + paths-ignore: + - 'build-image/**' + - '.github/workflows/build-image.yml' jobs: lint: runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:node_version_upgrade-60582e680 + image: quay.io/cortexproject/build-image:master-2866917df steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -40,7 +46,7 @@ jobs: test: runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:node_version_upgrade-60582e680 + image: quay.io/cortexproject/build-image:master-2866917df steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -83,7 +89,7 @@ jobs: build: runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:node_version_upgrade-60582e680 + image: quay.io/cortexproject/build-image:master-2866917df steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -107,7 +113,7 @@ jobs: touch build-image/.uptodate make BUILD_IN_CONTAINER=false web-build - name: Upload Website Artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: website public path: website/public/ @@ -119,7 +125,7 @@ jobs: - name: Create Docker Images Archive run: tar -cvf images.tar /tmp/images - name: Upload Docker Images Artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: Docker Images path: ./images.tar @@ -142,7 +148,7 @@ jobs: - name: Upgrade golang uses: actions/setup-go@v2 with: - go-version: 1.21.9 + go-version: 1.21.11 - name: Checkout Repo uses: actions/checkout@v2 - name: Install Docker Client @@ -152,7 +158,7 @@ jobs: sudo mkdir -p /go/src/github.com/cortexproject/cortex sudo ln -s $GITHUB_WORKSPACE/* /go/src/github.com/cortexproject/cortex - name: Download Docker Images Artifacts - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: name: Docker Images - name: Extract Docker Images Archive @@ -207,7 +213,7 @@ jobs: - name: Install Docker Client run: sudo ./.github/workflows/scripts/install-docker.sh - name: Download Docker Images Artifact - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: name: Docker Images - name: Extract Docker Images Archive @@ -217,14 +223,14 @@ jobs: run: | touch build-image/.uptodate MIGRATIONS_DIR=$(pwd)/cmd/cortex/migrations - make BUILD_IMAGE=quay.io/cortexproject/build-image:node_version_upgrade-60582e680 TTY='' configs-integration-test + make BUILD_IMAGE=quay.io/cortexproject/build-image:master-2866917df TTY='' configs-integration-test deploy_website: needs: [build, test] if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:node_version_upgrade-60582e680 + image: quay.io/cortexproject/build-image:master-2866917df steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -241,7 +247,7 @@ jobs: mkdir -p /go/src/github.com/cortexproject/cortex ln -s $GITHUB_WORKSPACE/* /go/src/github.com/cortexproject/cortex - name: Download Website Artifact - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: name: website public path: website/public @@ -266,7 +272,7 @@ jobs: if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-20.04 container: - image: quay.io/cortexproject/build-image:node_version_upgrade-60582e680 + image: quay.io/cortexproject/build-image:master-2866917df steps: - name: Checkout Repo uses: actions/checkout@v2 @@ -282,7 +288,7 @@ jobs: mkdir -p /go/src/github.com/cortexproject/cortex ln -s $GITHUB_WORKSPACE/* /go/src/github.com/cortexproject/cortex - name: Download Docker Images Artifact - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: name: Docker Images - name: Extract Docker Images Archive diff --git a/.gitignore b/.gitignore index ac025027c16..57492352860 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,6 @@ Makefile.local .vscode compose compose-simple + +/build-image-arm64.tar +/build-image-amd64.tar diff --git a/CHANGELOG.md b/CHANGELOG.md index 59ecd9b8d47..fc2b5cd5e5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,8 +15,11 @@ * [ENHANCEMENT] KV: Etcd Added etcd.ping-without-stream-allowed parameter to disable/enable PermitWithoutStream #5933 * [ENHANCEMENT] Ingester: Add a new `limits_per_label_set` limit. This limit functions similarly to `max_series_per_metric`, but allowing users to define the maximum number of series per LabelSet. #5950 #5993 * [ENHANCEMENT] Store Gateway: Log gRPC requests together with headers configured in `http_request_headers_to_log`. #5958 +* [ENHANCEMENT] Upgrade Alpine to 3.19. #6014 +* [ENHANCEMENT] Upgrade go to 1.21.11 #6014 * [BUGFIX] Configsdb: Fix endline issue in db password. #5920 * [BUGFIX] Ingester: Fix `user` and `type` labels for the `cortex_ingester_tsdb_head_samples_appended_total` TSDB metric. #5952 +* [BUGFIX] Querier: Enforce max query length check for `/api/v1/series` API even though `ignoreMaxQueryLength` is set to true. #6018 ## 1.17.1 2024-05-20 diff --git a/MAINTAINERS b/MAINTAINERS index c2faab7aef7..0a30c496bee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1,4 +1,5 @@ Alan Protasio, Amazon Web Services (@alanprot) Alvin Lin, Amazon Web Services (@alvinlin123) Ben Ye, Amazon Web Services (@yeya24) +Charlie Le, Apple (@CharlieTLe) Friedrich Gonzalez, Adobe (@friedrichg) diff --git a/Makefile b/Makefile index 7c0c10a337d..b7be6c01e5c 100644 --- a/Makefile +++ b/Makefile @@ -53,14 +53,7 @@ fetch-build-image: docker tag $(BUILD_IMAGE):$(LATEST_BUILD_IMAGE_TAG) $(BUILD_IMAGE):latest touch build-image/.uptodate -push-multiarch-build-image: - @echo - # Build image for each platform separately... it tends to generate fewer errors. - $(SUDO) docker buildx build --platform linux/amd64 --build-arg=revision=$(GIT_REVISION) --build-arg=goproxyValue=$(GOPROXY_VALUE) build-image/ - $(SUDO) docker buildx build --platform linux/arm64 --build-arg=revision=$(GIT_REVISION) --build-arg=goproxyValue=$(GOPROXY_VALUE) build-image/ - # This command will run the same build as above, but it will reuse existing platform-specific images, - # put them together and push to registry. - $(SUDO) docker buildx build -o type=registry --platform linux/amd64,linux/arm64 --build-arg=revision=$(GIT_REVISION) --build-arg=goproxyValue=$(GOPROXY_VALUE) -t $(IMAGE_PREFIX)build-image:$(IMAGE_TAG) build-image/ +-include build-image/Makefile # We don't want find to scan inside a bunch of directories, to accelerate the # 'make: Entering directory '/go/src/github.com/cortexproject/cortex' phase. @@ -122,7 +115,7 @@ build-image/$(UPTODATE): build-image/* SUDO := $(shell docker info >/dev/null 2>&1 || echo "sudo -E") BUILD_IN_CONTAINER := true BUILD_IMAGE ?= $(IMAGE_PREFIX)build-image -LATEST_BUILD_IMAGE_TAG ?= node_version_upgrade-60582e680 +LATEST_BUILD_IMAGE_TAG ?= master-2866917df # TTY is parameterized to allow Google Cloud Builder to run builds, # as it currently disallows TTY devices. This value needs to be overridden diff --git a/build-image/Dockerfile b/build-image/Dockerfile index d02887a345c..4a1f9079c0b 100644 --- a/build-image/Dockerfile +++ b/build-image/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.9-bullseye +FROM golang:1.21.11-bullseye ARG goproxyValue ENV GOPROXY=${goproxyValue} RUN apt-get update && apt-get install -y curl file jq unzip protobuf-compiler libprotobuf-dev && \ diff --git a/build-image/Makefile b/build-image/Makefile new file mode 100644 index 00000000000..9844b681ab2 --- /dev/null +++ b/build-image/Makefile @@ -0,0 +1,17 @@ +save-multiarch-build-image: + @echo + # Build image for each platform separately... it tends to generate fewer errors. + $(SUDO) docker buildx build --platform linux/amd64 --build-arg=revision=$(GIT_REVISION) --build-arg=goproxyValue=$(GOPROXY_VALUE) -t $(IMAGE_PREFIX)build-image:$(IMAGE_TAG)-amd64 --output type=docker,dest=./build-image-amd64.tar build-image/ + $(SUDO) docker buildx build --platform linux/arm64 --build-arg=revision=$(GIT_REVISION) --build-arg=goproxyValue=$(GOPROXY_VALUE) -t $(IMAGE_PREFIX)build-image:$(IMAGE_TAG)-arm64 --output type=docker,dest=./build-image-arm64.tar build-image/ + +load-multiarch-build-image: + $(SUDO) docker load -i build-image-amd64.tar + $(SUDO) docker load -i build-image-arm64.tar + +push-multiarch-build-image: + # This command will run the same build as multiarch-build-image, but it will reuse existing platform-specific images, + # put them together and push to registry. + $(SUDO) docker push $(IMAGE_PREFIX)build-image:${IMAGE_TAG}-amd64 + $(SUDO) docker push $(IMAGE_PREFIX)build-image:${IMAGE_TAG}-arm64 + $(SUDO) docker manifest create $(IMAGE_PREFIX)build-image:$(IMAGE_TAG) --amend $(IMAGE_PREFIX)build-image:${IMAGE_TAG}-amd64 --amend $(IMAGE_PREFIX)build-image:${IMAGE_TAG}-arm64 + $(SUDO) docker manifest push $(IMAGE_PREFIX)build-image:$(IMAGE_TAG) diff --git a/cmd/cortex/Dockerfile b/cmd/cortex/Dockerfile index 28a315d5b99..64fadb8840c 100644 --- a/cmd/cortex/Dockerfile +++ b/cmd/cortex/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18 +FROM alpine:3.19 ARG TARGETARCH RUN apk add --no-cache ca-certificates diff --git a/cmd/query-tee/Dockerfile b/cmd/query-tee/Dockerfile index cbd5f4558b8..4b3d9f5c3ce 100644 --- a/cmd/query-tee/Dockerfile +++ b/cmd/query-tee/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18 +FROM alpine:3.19 ARG TARGETARCH RUN apk add --no-cache ca-certificates diff --git a/cmd/test-exporter/Dockerfile b/cmd/test-exporter/Dockerfile index 86460a0600f..8016612157a 100644 --- a/cmd/test-exporter/Dockerfile +++ b/cmd/test-exporter/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18 +FROM alpine:3.19 ARG TARGETARCH RUN apk add --no-cache ca-certificates COPY test-exporter-$TARGETARCH /test-exporter diff --git a/cmd/thanosconvert/Dockerfile b/cmd/thanosconvert/Dockerfile index 80b13418210..67f33267e14 100644 --- a/cmd/thanosconvert/Dockerfile +++ b/cmd/thanosconvert/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18 +FROM alpine:3.19 ARG TARGETARCH RUN apk add --no-cache ca-certificates COPY thanosconvert-$TARGETARCH /thanosconvert diff --git a/development/tsdb-blocks-storage-s3-gossip/dev.dockerfile b/development/tsdb-blocks-storage-s3-gossip/dev.dockerfile index e1cf86cb2b8..f05d0c667e9 100644 --- a/development/tsdb-blocks-storage-s3-gossip/dev.dockerfile +++ b/development/tsdb-blocks-storage-s3-gossip/dev.dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18 +FROM golang:1.19 ENV CGO_ENABLED=0 RUN go get github.com/go-delve/delve/cmd/dlv diff --git a/development/tsdb-blocks-storage-s3-single-binary/dev.dockerfile b/development/tsdb-blocks-storage-s3-single-binary/dev.dockerfile index 71d0fc6df75..7ed184ff964 100644 --- a/development/tsdb-blocks-storage-s3-single-binary/dev.dockerfile +++ b/development/tsdb-blocks-storage-s3-single-binary/dev.dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18 +FROM alpine:3.19 RUN mkdir /cortex WORKDIR /cortex diff --git a/development/tsdb-blocks-storage-s3/dev.dockerfile b/development/tsdb-blocks-storage-s3/dev.dockerfile index e1cf86cb2b8..698ed36ea2d 100644 --- a/development/tsdb-blocks-storage-s3/dev.dockerfile +++ b/development/tsdb-blocks-storage-s3/dev.dockerfile @@ -1,8 +1,8 @@ -FROM golang:1.18 +FROM golang:1.19 ENV CGO_ENABLED=0 RUN go get github.com/go-delve/delve/cmd/dlv -FROM alpine:3.18 +FROM alpine:3.19 RUN mkdir /cortex WORKDIR /cortex diff --git a/development/tsdb-blocks-storage-swift-single-binary/dev.dockerfile b/development/tsdb-blocks-storage-swift-single-binary/dev.dockerfile index 71d0fc6df75..7ed184ff964 100644 --- a/development/tsdb-blocks-storage-swift-single-binary/dev.dockerfile +++ b/development/tsdb-blocks-storage-swift-single-binary/dev.dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18 +FROM alpine:3.19 RUN mkdir /cortex WORKDIR /cortex diff --git a/docs/api/_index.md b/docs/api/_index.md index 0c8a787927f..824a67736b2 100644 --- a/docs/api/_index.md +++ b/docs/api/_index.md @@ -212,6 +212,18 @@ _For more information, please check out Prometheus [Remote storage integrations] _Requires [authentication](#authentication)._ +### OTLP Receiver + +``` +POST /api/v1/otlp/v1/metrics +``` + +Entrypoint for the OTLP Receiver + +This API endpoint accepts a HTTP POST request using [OTLP](https://opentelemetry.io/docs/specs/otlp/) format + +_Requires [authentication](#authentication)._ + ### Distributor ring status ``` diff --git a/docs/contributing/how-to-update-the-build-image.md b/docs/contributing/how-to-update-the-build-image.md index 497ab00a004..df0c62a2f10 100644 --- a/docs/contributing/how-to-update-the-build-image.md +++ b/docs/contributing/how-to-update-the-build-image.md @@ -5,12 +5,10 @@ weight: 5 slug: how-to-update-the-build-image --- -The build image currently can only be updated by a Cortex maintainer. If you're not a maintainer you can still open a PR with the changes, asking a maintainer to assist you publishing the updated image. The procedure is: +The procedure is: 1. Update `build-image/Dockerfile` -1. Run `go env` and make sure `GOPROXY=https://proxy.golang.org,direct` (Go's default). Some environment may required `GOPROXY=direct`, and if you push a build image with this, build workflow on GitHub will take a lot longer to download modules. -1. `docker login quay.io`. Note that pushing to `quay.io/cortexproject/build-image` repository can only be done by a maintainer. -1. Build the and publish the image by using `make push-multiarch-build-image`. This will build and push multi-platform docker image (for linux/amd64 and linux/arm64). Running this step successfully requires [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/), but does not require a specific platform. -1. Replace the image tag in `.github/workflows/*` (_there may be multiple references_) and Makefile (variable `LATEST_BUILD_IMAGE_TAG`). +1. Create a PR to master with that changed, after the PR is merged to master, the new build image is available in the quay.io repository. Check github action logs [here](https://github.com/cortexproject/cortex/actions/workflows/build-image.yml) for to find the image tag. +1. Create another PR to replace the image tag in `.github/workflows/*` (_there may be multiple references_) and Makefile (variable `LATEST_BUILD_IMAGE_TAG`). 1. If you are updating Go's runtime version be sure to change `actions/setup-go`'s `go-version` in ``.github/workflows/*`. 1. Open a PR and make sure the CI with new build-image passes diff --git a/docs/getting-started/.env b/docs/getting-started/.env index d96073c23f4..58aa8c3d918 100644 --- a/docs/getting-started/.env +++ b/docs/getting-started/.env @@ -1,4 +1,4 @@ -CORTEX_VERSION=v1.17.0 +CORTEX_VERSION=v1.17.1 GRAFANA_VERSION=10.4.2 PROMETHEUS_VERSION=v2.51.2 SEAWEEDFS_VERSION=3.67 \ No newline at end of file diff --git a/docs/getting-started/_index.md b/docs/getting-started/_index.md index 68cb830d008..54c8ede5c6f 100644 --- a/docs/getting-started/_index.md +++ b/docs/getting-started/_index.md @@ -57,25 +57,27 @@ how this is configured. ```sh # Create a bucket in SeaweedFS -curl -X PUT http://localhost:8333/cortex-bucket +curl --aws-sigv4 "aws:amz:local:seaweedfs" --user "any:any" -X PUT http://localhost:8333/cortex-bucket ``` -#### Configure Grafana +#### Explore -1. Log into the Grafana instance at [http://localhost:3000](http://localhost:3000) - * login credentials are `username: admin` and `password: admin` - * There may be an additional screen on setting a new password. This can be skipped and is optional -1. Navigate to the `Data Sources` page - * Look for a gear icon on the left sidebar and select `Data Sources` -1. Add a new Prometheus Data Source - * Use `http://cortex:9009/api/prom` as the URL - * Click `Save & Test` -1. Go to `Metrics Explore` to query metrics - * Look for a compass icon on the left sidebar - * Click `Metrics` for a dropdown list of all the available metrics +Grafana is configured to use Cortex as a data source. You can explore the data source in Grafana and query metrics. For example, this [explore](http://localhost:3000/explore?schemaVersion=1&panes=%7B%22au0%22:%7B%22datasource%22:%22P6693426190CB2316%22,%22queries%22:%5B%7B%22refId%22:%22A%22,%22expr%22:%22rate%28prometheus_remote_storage_samples_total%5B$__rate_interval%5D%29%22,%22range%22:true,%22instant%22:true,%22datasource%22:%7B%22type%22:%22prometheus%22,%22uid%22:%22P6693426190CB2316%22%7D,%22editorMode%22:%22builder%22,%22legendFormat%22:%22__auto%22,%22useBackend%22:false,%22disableTextWrap%22:false,%22fullMetaSearch%22:false,%22includeNullMetadata%22:false%7D%5D,%22range%22:%7B%22from%22:%22now-1h%22,%22to%22:%22now%22%7D%7D%7D&orgId=1) page is showing the rate of samples being sent to Cortex. If everything is working correctly, then the metrics seen in Grafana were successfully sent from Prometheus to Cortex -via remote_write! +via `remote_write`! + +Other things to explore: + +- [Cortex](http://localhost:9009) - Administrative interface for Cortex + - Try shutting down the [ingester](http://localhost:9009/ingester/shutdown) and see how it affects metric ingestion. + - Restart Cortex to bring the ingester back online, and see how Prometheus catches up. + - Does it affect the querying of metrics in Grafana? +- [Prometheus](http://localhost:9090) - Prometheus instance that is sending metrics to Cortex + - Try querying the metrics in Prometheus. + - Are they the same as what you see in Cortex? +- [Grafana](http://localhost:3000) - Grafana instance that is visualizing the metrics. + - Try creating a new dashboard and adding a new panel with a query to Cortex. ### Clean up @@ -140,7 +142,7 @@ $ kubectl -n cortex port-forward svc/seaweedfs 8333 ```shell # Create a bucket -$ curl -X PUT http://localhost:8333/cortex-bucket +$ curl --aws-sigv4 "aws:amz:local:seaweedfs" --user "any:any" -X PUT http://localhost:8333/cortex-bucket ``` #### Setup Cortex @@ -148,47 +150,66 @@ $ curl -X PUT http://localhost:8333/cortex-bucket ```sh # Deploy Cortex using the provided values file which configures # - blocks storage to use the seaweedfs service -$ helm install --version=2.3.0 --namespace cortex cortex cortex-helm/cortex -f cortex-values.yaml +$ helm upgrade --install --version=2.3.0 --namespace cortex cortex cortex-helm/cortex -f cortex-values.yaml ``` #### Setup Prometheus ```sh # Deploy Prometheus to scrape metrics in the cluster and send them, via remote_write, to Cortex. -$ helm install --version=25.20.1 --namespace cortex prometheus prometheus-community/prometheus -f prometheus-values.yaml +$ helm upgrade --install --version=25.20.1 --namespace cortex prometheus prometheus-community/prometheus -f prometheus-values.yaml ``` #### Setup Grafana ```sh # Deploy Grafana to visualize the metrics that were sent to Cortex. -$ helm install --version=7.3.9 --namespace cortex grafana grafana/grafana +$ helm upgrade --install --version=7.3.9 --namespace cortex grafana grafana/grafana -f grafana-values.yaml ``` -#### Configure Grafana - -```sh -# Get your 'admin' user password -kubectl get secret --namespace cortex grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo -``` +#### Explore ```sh # Port-forward to Grafana to visualize -kubectl --namespace cortex port-forward deploy/cortex 3000 +kubectl --namespace cortex port-forward deploy/grafana 3000 ``` -1. Log into the Grafana instance at [http://localhost:3000](http://localhost:3000) -1. Use the username `admin` and the password from the Kubernetes secret -1. Navigate to the [Data Sources](http://localhost:3000/connections/datasources) page -1. Add a new Prometheus Data Source -1. Use `http://cortex-nginx/api/prom` as the URL -1. Click `Save & Test` -1. Go to [Explore](http://localhost:3000/explore) to query metrics -1. Click `Metrics` for a dropdown list of all the available metrics +Grafana is configured to use Cortex as a data source. You can explore the data source in Grafana and query metrics. For example, this [explore](http://localhost:3000/explore?schemaVersion=1&panes=%7B%22au0%22:%7B%22datasource%22:%22P6693426190CB2316%22,%22queries%22:%5B%7B%22refId%22:%22A%22,%22expr%22:%22rate%28prometheus_remote_storage_samples_total%5B$__rate_interval%5D%29%22,%22range%22:true,%22instant%22:true,%22datasource%22:%7B%22type%22:%22prometheus%22,%22uid%22:%22P6693426190CB2316%22%7D,%22editorMode%22:%22builder%22,%22legendFormat%22:%22__auto%22,%22useBackend%22:false,%22disableTextWrap%22:false,%22fullMetaSearch%22:false,%22includeNullMetadata%22:false%7D%5D,%22range%22:%7B%22from%22:%22now-1h%22,%22to%22:%22now%22%7D%7D%7D&orgId=1) page is showing the rate of samples being sent to Cortex. + If everything is working correctly, then the metrics seen in Grafana were successfully sent from Prometheus to Cortex via remote_write! +Other things to explore: + +```sh +# Port forward to the ingester to see the administrative interface for Cortex: +$ kubectl --namespace cortex port-forward deploy/cortex-ingester 8080 +``` + +- [Cortex Ingester](http://localhost:8080) + - Try shutting down the [ingester](http://localhost:8080/ingester/shutdown) and see how it affects metric ingestion. + - Restart ingester pod to bring the ingester back online, and see if Prometheus affected. + - Does it affect the querying of metrics in Grafana? How many ingesters must be offline before it affects querying? + + +```sh +# Port forward to Prometheus to see the metrics that are being scraped: +$ kubectl --namespace cortex port-forward deploy/prometheus-server 9090 +``` + +- [Prometheus](http://localhost:9090) - Prometheus instance that is sending metrics to Cortex + - Try querying the metrics in Prometheus. + - Are they the same as what you see in Cortex? + +```sh +# Port forward to Prometheus to see the metrics that are being scraped: +$ kubectl --namespace cortex port-forward deploy/grafana 3000 +``` + +- [Grafana](http://localhost:3000) - Grafana instance that is visualizing the metrics. + - Try creating a new dashboard and adding a new panel with a query to Cortex. + ### Clean up ```sh diff --git a/docs/getting-started/cortex-config.yaml b/docs/getting-started/cortex-config.yaml index f86f1c71d03..d11b626dedc 100644 --- a/docs/getting-started/cortex-config.yaml +++ b/docs/getting-started/cortex-config.yaml @@ -5,7 +5,7 @@ target: all,compactor -# Disable the requirement that every request to Cortex has a +# Disable the requirement that every request to Cortex has an # X-Scope-OrgID header. `fake` will be substituted in instead. auth_enabled: false @@ -49,10 +49,10 @@ ingester: blocks_storage: s3: &s3 endpoint: seaweedfs:8333 - region: baz + region: local bucket_name: cortex-bucket - access_key_id: foo - secret_access_key: bar + access_key_id: any + secret_access_key: any insecure: true tsdb: dir: /data/tsdb diff --git a/docs/getting-started/cortex-values.yaml b/docs/getting-started/cortex-values.yaml index 477f4a8eef8..1be7862b1ef 100644 --- a/docs/getting-started/cortex-values.yaml +++ b/docs/getting-started/cortex-values.yaml @@ -81,10 +81,10 @@ config: blocks_storage: s3: &s3 endpoint: seaweedfs.cortex.svc.cluster.local:8333 - region: baz + region: local bucket_name: cortex-bucket - access_key_id: foo - secret_access_key: bar + access_key_id: any + secret_access_key: any insecure: true tsdb: dir: /data/tsdb diff --git a/docs/getting-started/docker-compose.yaml b/docs/getting-started/docker-compose.yaml index 8af2bdf6948..f78295a4fcc 100644 --- a/docs/getting-started/docker-compose.yaml +++ b/docs/getting-started/docker-compose.yaml @@ -10,8 +10,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -version: "3.8" - services: cortex: image: quay.io/cortexproject/cortex:${CORTEX_VERSION} @@ -20,16 +18,25 @@ services: volumes: - ./cortex-config.yaml:/config/cortex-config.yaml:ro ports: - - 9009:9009 + - "9009:9009" healthcheck: test: wget -qO- http://localhost:9009/ready interval: 10s timeout: 10s retries: 3 + restart: on-failure grafana: image: grafana/grafana:${GRAFANA_VERSION} + environment: + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + - GF_USERS_DEFAULT_THEME=light + - GF_LOG_MODE=console + - GF_LOG_LEVEL=critical + volumes: + - ./grafana-datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro ports: - - 3000:3000 + - "3000:3000" prometheus: image: prom/prometheus:${PROMETHEUS_VERSION} command: @@ -37,14 +44,17 @@ services: volumes: - ./prometheus-config.yaml:/config/prometheus-config.yaml:ro ports: - - 9090:9090 + - "9090:9090" seaweedfs: image: chrislusf/seaweedfs:${SEAWEEDFS_VERSION} command: - server - -s3 + - -s3.config=/workspace/seaweedfs-config.json ports: - - 8333:8333 + - "8333:8333" + volumes: + - ./seaweedfs-config.json:/workspace/seaweedfs-config.json:ro healthcheck: test: wget -qO- http://localhost:8333/status interval: 10s diff --git a/docs/getting-started/grafana-datasource.yaml b/docs/getting-started/grafana-datasource.yaml new file mode 100644 index 00000000000..62a39c88dbe --- /dev/null +++ b/docs/getting-started/grafana-datasource.yaml @@ -0,0 +1,11 @@ +apiVersion: 1 + +datasources: + - name: Cortex + type: prometheus + access: proxy + orgId: 1 + url: http://cortex:9009/api/prom + version: 1 + editable: true + isDefault: true diff --git a/docs/getting-started/grafana-values.yaml b/docs/getting-started/grafana-values.yaml index 0e6ba7f3614..d612cb27d8d 100644 --- a/docs/getting-started/grafana-values.yaml +++ b/docs/getting-started/grafana-values.yaml @@ -465,7 +465,12 @@ admin: ## 6. uncomment the annotation section in the serviceAccount: above ## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn -env: {} +env: + GF_AUTH_ANONYMOUS_ENABLED: true + GF_AUTH_ANONYMOUS_ORG_ROLE: Admin + GF_USERS_DEFAULT_THEME: light + GF_LOG_MODE: console + GF_LOG_LEVEL: critical ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core @@ -590,15 +595,16 @@ plugins: [] ## Configure grafana datasources ## ref: http://docs.grafana.org/administration/provisioning/#datasources ## -datasources: {} -# datasources.yaml: -# apiVersion: 1 -# datasources: -# - name: Prometheus -# type: prometheus -# url: http://prometheus-prometheus-server -# access: proxy -# isDefault: true +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + - name: Cortex + type: prometheus + url: http://cortex-nginx/api/prom + access: proxy + isDefault: true + editable: true # - name: CloudWatch # type: cloudwatch # access: proxy diff --git a/docs/getting-started/prometheus-values.yaml b/docs/getting-started/prometheus-values.yaml index 653cc093bd9..c7ca147ad7f 100644 --- a/docs/getting-started/prometheus-values.yaml +++ b/docs/getting-started/prometheus-values.yaml @@ -238,7 +238,7 @@ server: global: ## How frequently to scrape targets by default ## - scrape_interval: 1m + scrape_interval: 15s ## How long until a scrape request times out ## scrape_timeout: 10s diff --git a/docs/getting-started/seaweedfs-config.json b/docs/getting-started/seaweedfs-config.json new file mode 100644 index 00000000000..75b0e4b9550 --- /dev/null +++ b/docs/getting-started/seaweedfs-config.json @@ -0,0 +1,16 @@ +{ + "identities": [ + { + "name": "cortex", + "credentials": [ + { + "accessKey": "any", + "secretKey": "any" + } + ], + "actions": [ + "Admin" + ] + } + ] +} \ No newline at end of file diff --git a/docs/getting-started/seaweedfs.yaml b/docs/getting-started/seaweedfs.yaml index 7461e213fd8..5ca4f24ddbe 100644 --- a/docs/getting-started/seaweedfs.yaml +++ b/docs/getting-started/seaweedfs.yaml @@ -1,3 +1,27 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: null + name: seaweedfs-config +data: + seaweedfs-config.json: |- + { + "identities": [ + { + "name": "cortex", + "credentials": [ + { + "accessKey": "any", + "secretKey": "any" + } + ], + "actions": [ + "Admin" + ] + } + ] + } +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -22,8 +46,16 @@ spec: args: - "server" - "-s3" + - "-s3.config=/workspace/seaweedfs-config.json" ports: - containerPort: 8333 + volumeMounts: + - name: seaweedfs-config + mountPath: /workspace + volumes: + - name: seaweedfs-config + configMap: + name: seaweedfs-config restartPolicy: Always --- apiVersion: v1 diff --git a/go.mod b/go.mod index 98c6838c7c9..cb96429c606 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.9 github.com/lib/pq v1.10.9 - github.com/minio/minio-go/v7 v7.0.70 + github.com/minio/minio-go/v7 v7.0.72 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e @@ -44,24 +44,24 @@ require ( github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.54.0 + github.com/prometheus/common v0.54.1-0.20240615204547-04635d2962f9 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.52.2-0.20240606174736-edd558884b24 + github.com/prometheus/prometheus v0.53.1-0.20240625160125-1abeebacb870 github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v1.0.0 github.com/spf13/afero v1.11.0 github.com/stretchr/testify v1.9.0 - github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3 + github.com/thanos-io/objstore v0.0.0-20240613135658-39f40b8d97f7 github.com/thanos-io/promql-engine v0.0.0-20240515161521-93aa311933cf - github.com/thanos-io/thanos v0.35.2-0.20240613160422-651a4a440e8c + github.com/thanos-io/thanos v0.35.2-0.20240617212227-065e3dd75aac github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5 go.etcd.io/etcd/api/v3 v3.5.13 go.etcd.io/etcd/client/pkg/v3 v3.5.13 go.etcd.io/etcd/client/v3 v3.5.13 - go.opentelemetry.io/contrib/propagators/aws v1.22.0 + go.opentelemetry.io/contrib/propagators/aws v1.27.0 go.opentelemetry.io/otel v1.27.0 - go.opentelemetry.io/otel/bridge/opentracing v1.26.0 + go.opentelemetry.io/otel/bridge/opentracing v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 go.opentelemetry.io/otel/sdk v1.27.0 @@ -83,7 +83,7 @@ require ( github.com/sercand/kuberesolver/v4 v4.0.0 go.opentelemetry.io/collector/pdata v1.8.0 golang.org/x/exp v0.0.0-20240119083558-1b970713d09a - google.golang.org/protobuf v1.34.1 + google.golang.org/protobuf v1.34.2 ) require ( @@ -94,8 +94,8 @@ require ( cloud.google.com/go/iam v1.1.8 // indirect cloud.google.com/go/storage v1.40.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect @@ -144,7 +144,7 @@ require ( github.com/gogo/googleapis v1.4.0 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.2 // indirect github.com/google/pprof v0.0.0-20240528025155-186aa0362fba // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect @@ -153,7 +153,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect @@ -166,7 +166,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect @@ -268,6 +268,3 @@ replace gopkg.in/alecthomas/kingpin.v2 => github.com/alecthomas/kingpin v1.3.8-0 // Pin kuberesolver/v5 to support new grpc version. Need to upgrade kuberesolver version on weaveworks/common. replace github.com/sercand/kuberesolver/v4 => github.com/sercand/kuberesolver/v5 v5.1.1 - -// Temporarily pinning prometheus common: see https://github.com/thanos-io/thanos/pull/7416#issuecomment-2150585994 -replace github.com/prometheus/common v0.54.0 => github.com/prometheus/common v0.53.0 diff --git a/go.sum b/go.sum index 37b2980ce6e..487105c960a 100644 --- a/go.sum +++ b/go.sum @@ -526,10 +526,10 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= @@ -650,6 +650,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= +github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous= @@ -719,8 +721,6 @@ github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LO github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -780,6 +780,8 @@ github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmV github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474 h1:TufioMBjkJ6/Oqmlye/ReuxHFS35HyLmypj/BNy/8GY= +github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go.mod h1:PQwxF4UU8wuL+srGxr3BOhIW5zXqgucwVlO/nPZLsxw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= @@ -917,8 +919,8 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= @@ -1039,8 +1041,8 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -1052,8 +1054,8 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= -github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -1128,8 +1130,8 @@ github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2 github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1202,8 +1204,8 @@ github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcs github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.70 h1:1u9NtMgfK1U42kUxcsl5v0yj6TEOPR497OAQxpJnn2g= -github.com/minio/minio-go/v7 v7.0.70/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo= +github.com/minio/minio-go/v7 v7.0.72 h1:ZSbxs2BfJensLyHdVOgHv+pfmvxYraaUy07ER04dWnA= +github.com/minio/minio-go/v7 v7.0.72/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -1323,8 +1325,8 @@ github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= -github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= -github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/common v0.54.1-0.20240615204547-04635d2962f9 h1:WTZ/GBRTImL1HgRTEnJJcM2FuII7PXX1idCIEUJ8/r8= +github.com/prometheus/common v0.54.1-0.20240615204547-04635d2962f9/go.mod h1:1Yn/UzXoahbVLk1sn6wsGiSiemz3XQejcaz9FIA1r+I= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0= @@ -1340,8 +1342,8 @@ github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0ua github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.52.2-0.20240606174736-edd558884b24 h1:h7ScWoH/UHp/Fz6eo/cJva/CbsyQjYtMOtAgIhTUBBE= -github.com/prometheus/prometheus v0.52.2-0.20240606174736-edd558884b24/go.mod h1:RZDkzs+ShMBDkAPQkLEaLBXpjmDcjhNxU2drUVPgKUU= +github.com/prometheus/prometheus v0.53.1-0.20240625160125-1abeebacb870 h1:crWrpxOPZFIZ+M7AniW0Aps4TvRvMdr0HmtNFeEj79I= +github.com/prometheus/prometheus v0.53.1-0.20240625160125-1abeebacb870/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8= github.com/redis/rueidis v1.0.14-go1.18 h1:dGir5z8w8X1ex7JWO/Zx2FMBrZgQ8Yjm+lw9fPLSNGw= github.com/redis/rueidis v1.0.14-go1.18/go.mod h1:HGekzV3HbmzFmRK6j0xic8Z9119+ECoGMjeN1TV1NYU= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1418,12 +1420,12 @@ github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4 github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3 h1:Q0BjHI7FMe5KkKVXBFYto5VNASxiA/+AEhHup/IT7N0= -github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3/go.mod h1:ptMYNPgbyAR7a2Ab2t7zHA2/0be2ePyawVR7lp7fZtg= +github.com/thanos-io/objstore v0.0.0-20240613135658-39f40b8d97f7 h1:Mu/VxwijVceWmzyHnpYrDBpdsrX4iREVnnB3wX3f1Pg= +github.com/thanos-io/objstore v0.0.0-20240613135658-39f40b8d97f7/go.mod h1:OdoR9ITIuwn4ldouL24CC3zg8J+fPMb8gg9fdQKQDYE= github.com/thanos-io/promql-engine v0.0.0-20240515161521-93aa311933cf h1:R6of9adrCWXhETBstsFzNqrZou5UqeY3fh3k5yv5POY= github.com/thanos-io/promql-engine v0.0.0-20240515161521-93aa311933cf/go.mod h1:FEPnabuTql1bDA4OUM41mwcZOJ20R436k8vq+xtGEG0= -github.com/thanos-io/thanos v0.35.2-0.20240613160422-651a4a440e8c h1:VYcN9Zqklr+jOmjrX8/kQqI1lxZ6uquw0yRq1/fWmMg= -github.com/thanos-io/thanos v0.35.2-0.20240613160422-651a4a440e8c/go.mod h1:5PCP6ts0pKaH/nyARswUe66SKjAEFKPz7oMdDQPMUN8= +github.com/thanos-io/thanos v0.35.2-0.20240617212227-065e3dd75aac h1:Z3Au2M3k6lUZH/9tbkDHlmqGXZb01GrBQ2a8GUZozeE= +github.com/thanos-io/thanos v0.35.2-0.20240617212227-065e3dd75aac/go.mod h1:o9wuuNcTgPmusJuXXNxr39tbC1/solJ4nsZcl3N4VHI= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= @@ -1482,8 +1484,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 h1:WZwiLCwOL0XW/6TVT7LTtdRDveoHZ6q3wL+0iYsBcdE= go.opentelemetry.io/contrib/propagators/autoprop v0.38.0/go.mod h1:JBebP2d0HiffbfelbIEoBOCl4790g7Z8lD1scUd3Vd8= -go.opentelemetry.io/contrib/propagators/aws v1.22.0 h1:SKtPYiel5TWrE9gib3F4/BUcrvVjKsA5CH9xnWvj6cQ= -go.opentelemetry.io/contrib/propagators/aws v1.22.0/go.mod h1:zau7d6VqIVtBLLoD+WIufyXK6ZWhSdpiG6tY/hBXu+Y= +go.opentelemetry.io/contrib/propagators/aws v1.27.0 h1:RJexJi4R0S9CpxzuhhzGlTCIpaaK9SJH9g9BFrCWfPE= +go.opentelemetry.io/contrib/propagators/aws v1.27.0/go.mod h1:bqU5Ma1dEQ7VtRbPMUsH8UDTuTMiLJN4W+eUmyNVayc= go.opentelemetry.io/contrib/propagators/b3 v1.13.0 h1:f17PBmZK60RoHvOpJVqEka8oS2EXjpjHquESD/8zZ50= go.opentelemetry.io/contrib/propagators/b3 v1.13.0/go.mod h1:zy2hz1TpGUoJzSwlBchVGvVAFQS8s2pglKLbrAFZ+Sc= go.opentelemetry.io/contrib/propagators/jaeger v1.13.0 h1:+tVlvpiQMOCzi4EYCaBjblibpyKfqoph0fcITmtXMws= @@ -1492,8 +1494,8 @@ go.opentelemetry.io/contrib/propagators/ot v1.13.0 h1:tHWNd0WRS6w9keZoZg9aF3zYoh go.opentelemetry.io/contrib/propagators/ot v1.13.0/go.mod h1:R6Op9T6LxNaMRVlGD0wVwz40LSsAq296CXiEydKLQBU= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/bridge/opentracing v1.26.0 h1:Q/dHj0DOhfLMAs5u5ucAbC7gy66x9xxsZRLpHCJ4XhI= -go.opentelemetry.io/otel/bridge/opentracing v1.26.0/go.mod h1:HfypvOw/8rqu4lXDhwaxVK1ibBAi1lTMXBHV9rywOCw= +go.opentelemetry.io/otel/bridge/opentracing v1.27.0 h1:3XKC6o2N1PEpaB4rts+EAMJhM1L5V5HYnlnIxeQPk1E= +go.opentelemetry.io/otel/bridge/opentracing v1.27.0/go.mod h1:X8h5bPiFFsgCrj1uKx1K1VvqGhFczYZ0oZKPd0qzSBs= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= @@ -2210,8 +2212,8 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/integration/e2e/util.go b/integration/e2e/util.go index 2175928cea2..ef877020c01 100644 --- a/integration/e2e/util.go +++ b/integration/e2e/util.go @@ -268,14 +268,14 @@ func CreateBlock( return id, errors.Wrap(err, "create compactor") } - id, err = c.Write(dir, h, mint, maxt, nil) + ids, err := c.Write(dir, h, mint, maxt, nil) if err != nil { return id, errors.Wrap(err, "write block") } - - if id.Compare(ulid.ULID{}) == 0 { + if len(ids) == 0 { return id, errors.Errorf("nothing to write, asked for %d samples", numSamples) } + id = ids[0] blockDir := filepath.Join(dir, id.String()) logger := log.NewNopLogger() diff --git a/packaging/fpm/Dockerfile b/packaging/fpm/Dockerfile index e8cd0253871..d23747230c7 100644 --- a/packaging/fpm/Dockerfile +++ b/packaging/fpm/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18 +FROM alpine:3.19 RUN apk add --no-cache \ ruby \ diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 726a167bee5..a886a86249b 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -1729,19 +1729,14 @@ func (m *tsdbCompactorMock) Plan(dir string) ([]string, error) { return args.Get(0).([]string), args.Error(1) } -func (m *tsdbCompactorMock) Write(dest string, b tsdb.BlockReader, mint, maxt int64, parent *tsdb.BlockMeta) (ulid.ULID, error) { - args := m.Called(dest, b, mint, maxt, parent) - return args.Get(0).(ulid.ULID), args.Error(1) -} - -func (m *tsdbCompactorMock) Compact(dest string, dirs []string, open []*tsdb.Block) (ulid.ULID, error) { +func (m *tsdbCompactorMock) Compact(dest string, dirs []string, open []*tsdb.Block) ([]ulid.ULID, error) { args := m.Called(dest, dirs, open) - return args.Get(0).(ulid.ULID), args.Error(1) + return args.Get(0).([]ulid.ULID), args.Error(1) } -func (m *tsdbCompactorMock) CompactWithBlockPopulator(dest string, dirs []string, open []*tsdb.Block, blockPopulator tsdb.BlockPopulator) (uid ulid.ULID, err error) { +func (m *tsdbCompactorMock) CompactWithBlockPopulator(dest string, dirs []string, open []*tsdb.Block, blockPopulator tsdb.BlockPopulator) ([]ulid.ULID, error) { args := m.Called(dest, dirs, open, blockPopulator) - return args.Get(0).(ulid.ULID), args.Error(1) + return args.Get(0).([]ulid.ULID), args.Error(1) } type tsdbPlannerMock struct { diff --git a/pkg/cortexpb/compat.go b/pkg/cortexpb/compat.go index 07bd89f716d..6de2423d562 100644 --- a/pkg/cortexpb/compat.go +++ b/pkg/cortexpb/compat.go @@ -44,6 +44,15 @@ func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMe return req } +func (w *WriteRequest) AddHistogramTimeSeries(lbls []labels.Labels, histograms []Histogram) { + for i := 0; i < len(lbls); i++ { + ts := TimeseriesFromPool() + ts.Labels = append(ts.Labels, FromLabelsToLabelAdapters(lbls[i])...) + ts.Histograms = append(ts.Histograms, histograms[i]) + w.Timeseries = append(w.Timeseries, PreallocTimeseries{TimeSeries: ts}) + } +} + // FromLabelAdaptersToLabels casts []LabelAdapter to labels.Labels. // It uses unsafe, but as LabelAdapter == labels.Label this should be safe. // This allows us to use labels.Labels directly in protos. diff --git a/pkg/cortexpb/timeseries.go b/pkg/cortexpb/timeseries.go index da1eff65dff..db7354ffe45 100644 --- a/pkg/cortexpb/timeseries.go +++ b/pkg/cortexpb/timeseries.go @@ -12,10 +12,11 @@ import ( ) var ( - expectedTimeseries = 100 - expectedLabels = 20 - expectedSamplesPerSeries = 10 - expectedExemplarsPerSeries = 1 + expectedTimeseries = 100 + expectedLabels = 20 + expectedSamplesPerSeries = 10 + expectedExemplarsPerSeries = 1 + expectedHistogramsPerSeries = 1 /* We cannot pool these as pointer-to-slice because the place we use them is in WriteRequest which is generated from Protobuf @@ -31,9 +32,10 @@ var ( timeSeriesPool = sync.Pool{ New: func() interface{} { return &TimeSeries{ - Labels: make([]LabelAdapter, 0, expectedLabels), - Samples: make([]Sample, 0, expectedSamplesPerSeries), - Exemplars: make([]Exemplar, 0, expectedExemplarsPerSeries), + Labels: make([]LabelAdapter, 0, expectedLabels), + Samples: make([]Sample, 0, expectedSamplesPerSeries), + Exemplars: make([]Exemplar, 0, expectedExemplarsPerSeries), + Histograms: make([]Histogram, 0, expectedHistogramsPerSeries), } }, } diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index cdd9ed0afc1..139c24fbb0f 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -135,6 +135,7 @@ func TestDistributor_Push(t *testing.T) { lastSeenTimestamp := "cortex_distributor_latest_seen_sample_timestamp_seconds" distributorAppend := "cortex_distributor_ingester_appends_total" distributorAppendFailure := "cortex_distributor_ingester_append_failures_total" + distributorReceivedSamples := "cortex_distributor_received_samples_total" ctx := user.InjectOrgID(context.Background(), "userDistributorPush") type samplesIn struct { @@ -146,6 +147,7 @@ func TestDistributor_Push(t *testing.T) { numIngesters int happyIngesters int samples samplesIn + histogramSamples bool metadata int expectedResponse *cortexpb.WriteResponse expectedError error @@ -276,6 +278,77 @@ func TestDistributor_Push(t *testing.T) { cortex_distributor_ingester_appends_total{ingester="2",type="metadata"} 1 `, }, + "A push to 3 happy ingesters should succeed, histograms": { + numIngesters: 3, + happyIngesters: 3, + samples: samplesIn{num: 5, startTimestampMs: 123456789000}, + histogramSamples: true, + metadata: 5, + expectedResponse: emptyResponse, + metricNames: []string{lastSeenTimestamp, distributorReceivedSamples}, + expectedMetrics: ` + # HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user. + # TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge + cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.004 + # HELP cortex_distributor_received_samples_total The total number of received samples, excluding rejected and deduped samples. + # TYPE cortex_distributor_received_samples_total counter + cortex_distributor_received_samples_total{type="float",user="userDistributorPush"} 0 + cortex_distributor_received_samples_total{type="histogram",user="userDistributorPush"} 5 + `, + }, + "A push to 2 happy ingesters should succeed, histograms": { + numIngesters: 3, + happyIngesters: 2, + samples: samplesIn{num: 5, startTimestampMs: 123456789000}, + histogramSamples: true, + metadata: 5, + expectedResponse: emptyResponse, + metricNames: []string{lastSeenTimestamp, distributorReceivedSamples}, + expectedMetrics: ` + # HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user. + # TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge + cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.004 + # HELP cortex_distributor_received_samples_total The total number of received samples, excluding rejected and deduped samples. + # TYPE cortex_distributor_received_samples_total counter + cortex_distributor_received_samples_total{type="float",user="userDistributorPush"} 0 + cortex_distributor_received_samples_total{type="histogram",user="userDistributorPush"} 5 + `, + }, + "A push to 1 happy ingesters should fail, histograms": { + numIngesters: 3, + happyIngesters: 1, + samples: samplesIn{num: 10, startTimestampMs: 123456789000}, + histogramSamples: true, + expectedError: errFail, + metricNames: []string{lastSeenTimestamp, distributorReceivedSamples}, + expectedMetrics: ` + # HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user. + # TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge + cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.009 + # HELP cortex_distributor_received_samples_total The total number of received samples, excluding rejected and deduped samples. + # TYPE cortex_distributor_received_samples_total counter + cortex_distributor_received_samples_total{type="float",user="userDistributorPush"} 0 + cortex_distributor_received_samples_total{type="histogram",user="userDistributorPush"} 10 + `, + }, + "A push exceeding burst size should fail, histograms": { + numIngesters: 3, + happyIngesters: 3, + samples: samplesIn{num: 25, startTimestampMs: 123456789000}, + histogramSamples: true, + metadata: 5, + expectedError: httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (20) exceeded while adding 25 samples and 5 metadata"), + metricNames: []string{lastSeenTimestamp, distributorReceivedSamples}, + expectedMetrics: ` + # HELP cortex_distributor_latest_seen_sample_timestamp_seconds Unix timestamp of latest received sample per user. + # TYPE cortex_distributor_latest_seen_sample_timestamp_seconds gauge + cortex_distributor_latest_seen_sample_timestamp_seconds{user="userDistributorPush"} 123456789.024 + # HELP cortex_distributor_received_samples_total The total number of received samples, excluding rejected and deduped samples. + # TYPE cortex_distributor_received_samples_total counter + cortex_distributor_received_samples_total{type="float",user="userDistributorPush"} 0 + cortex_distributor_received_samples_total{type="histogram",user="userDistributorPush"} 25 + `, + }, } { for _, shardByAllLabels := range []bool{true, false} { tc := tc @@ -297,7 +370,12 @@ func TestDistributor_Push(t *testing.T) { errFail: tc.ingesterError, }) - request := makeWriteRequest(tc.samples.startTimestampMs, tc.samples.num, tc.metadata) + var request *cortexpb.WriteRequest + if !tc.histogramSamples { + request = makeWriteRequest(tc.samples.startTimestampMs, tc.samples.num, tc.metadata, 0) + } else { + request = makeWriteRequest(tc.samples.startTimestampMs, 0, tc.metadata, tc.samples.num) + } response, err := ds[0].Push(ctx, request) assert.Equal(t, tc.expectedResponse, response) assert.Equal(t, status.Code(tc.expectedError), status.Code(err)) @@ -554,37 +632,45 @@ func TestDistributor_PushIngestionRateLimiter(t *testing.T) { for testName, testData := range tests { testData := testData - t.Run(testName, func(t *testing.T) { - t.Parallel() - limits := &validation.Limits{} - flagext.DefaultValues(limits) - limits.IngestionRateStrategy = testData.ingestionRateStrategy - limits.IngestionRate = testData.ingestionRate - limits.IngestionBurstSize = testData.ingestionBurstSize - - // Start all expected distributors - distributors, _, _, _ := prepare(t, prepConfig{ - numIngesters: 3, - happyIngesters: 3, - numDistributors: testData.distributors, - shardByAllLabels: true, - limits: limits, - }) + for _, enableHistogram := range []bool{false, true} { + enableHistogram := enableHistogram + t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(enableHistogram)), func(t *testing.T) { + t.Parallel() + limits := &validation.Limits{} + flagext.DefaultValues(limits) + limits.IngestionRateStrategy = testData.ingestionRateStrategy + limits.IngestionRate = testData.ingestionRate + limits.IngestionBurstSize = testData.ingestionBurstSize - // Push samples in multiple requests to the first distributor - for _, push := range testData.pushes { - request := makeWriteRequest(0, push.samples, push.metadata) - response, err := distributors[0].Push(ctx, request) + // Start all expected distributors + distributors, _, _, _ := prepare(t, prepConfig{ + numIngesters: 3, + happyIngesters: 3, + numDistributors: testData.distributors, + shardByAllLabels: true, + limits: limits, + }) - if push.expectedError == nil { - assert.Equal(t, emptyResponse, response) - assert.Nil(t, err) - } else { - assert.Nil(t, response) - assert.Equal(t, push.expectedError, err) + // Push samples in multiple requests to the first distributor + for _, push := range testData.pushes { + var request *cortexpb.WriteRequest + if !enableHistogram { + request = makeWriteRequest(0, push.samples, push.metadata, 0) + } else { + request = makeWriteRequest(0, 0, push.metadata, push.samples) + } + response, err := distributors[0].Push(ctx, request) + + if push.expectedError == nil { + assert.Equal(t, emptyResponse, response) + assert.Nil(t, err) + } else { + assert.Nil(t, response) + assert.Equal(t, push.expectedError, err) + } } - } - }) + }) + } } } @@ -620,7 +706,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[2].failResp.Store(httpgrpc.Errorf(429, "Throttling")) for i := 0; i < numberOfWrites; i++ { - request := makeWriteRequest(0, 30, 20) + request := makeWriteRequest(0, 30, 20, 10) _, err := d.Push(ctx, request) status, ok := status.FromError(err) require.True(t, ok) @@ -633,7 +719,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[2].failResp.Store(httpgrpc.Errorf(500, "InternalServerError")) for i := 0; i < numberOfWrites; i++ { - request := makeWriteRequest(0, 300, 200) + request := makeWriteRequest(0, 300, 200, 10) _, err := d.Push(ctx, request) status, ok := status.FromError(err) require.True(t, ok) @@ -646,7 +732,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[2].happy.Store(true) for i := 0; i < numberOfWrites; i++ { - request := makeWriteRequest(0, 30, 20) + request := makeWriteRequest(0, 30, 20, 10) _, err := d.Push(ctx, request) status, ok := status.FromError(err) require.True(t, ok) @@ -659,7 +745,7 @@ func TestPush_QuorumError(t *testing.T) { ingesters[2].happy.Store(true) for i := 0; i < 1; i++ { - request := makeWriteRequest(0, 30, 20) + request := makeWriteRequest(0, 30, 20, 10) _, err := d.Push(ctx, request) require.NoError(t, err) } @@ -690,7 +776,7 @@ func TestPush_QuorumError(t *testing.T) { } for i := 0; i < numberOfWrites; i++ { - request := makeWriteRequest(0, 30, 20) + request := makeWriteRequest(0, 30, 20, 10) _, err := d.Push(ctx, request) require.Error(t, err) status, ok := status.FromError(err) @@ -809,45 +895,53 @@ func TestDistributor_PushInstanceLimits(t *testing.T) { for testName, testData := range tests { testData := testData - t.Run(testName, func(t *testing.T) { - t.Parallel() - limits := &validation.Limits{} - flagext.DefaultValues(limits) + for _, enableHistogram := range []bool{true, false} { + enableHistogram := enableHistogram + t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(enableHistogram)), func(t *testing.T) { + t.Parallel() + limits := &validation.Limits{} + flagext.DefaultValues(limits) - // Start all expected distributors - distributors, _, regs, _ := prepare(t, prepConfig{ - numIngesters: 3, - happyIngesters: 3, - numDistributors: 1, - shardByAllLabels: true, - limits: limits, - maxInflightRequests: testData.inflightLimit, - maxIngestionRate: testData.ingestionRateLimit, - }) + // Start all expected distributors + distributors, _, regs, _ := prepare(t, prepConfig{ + numIngesters: 3, + happyIngesters: 3, + numDistributors: 1, + shardByAllLabels: true, + limits: limits, + maxInflightRequests: testData.inflightLimit, + maxIngestionRate: testData.ingestionRateLimit, + }) - d := distributors[0] - d.inflightPushRequests.Add(int64(testData.preInflight)) - d.ingestionRate.Add(int64(testData.preRateSamples)) + d := distributors[0] + d.inflightPushRequests.Add(int64(testData.preInflight)) + d.ingestionRate.Add(int64(testData.preRateSamples)) - d.ingestionRate.Tick() + d.ingestionRate.Tick() - for _, push := range testData.pushes { - request := makeWriteRequest(0, push.samples, push.metadata) - _, err := d.Push(ctx, request) + for _, push := range testData.pushes { + var request *cortexpb.WriteRequest + if enableHistogram { + request = makeWriteRequest(0, 0, push.metadata, push.samples) + } else { + request = makeWriteRequest(0, push.samples, push.metadata, 0) + } + _, err := d.Push(ctx, request) - if push.expectedError == nil { - assert.Nil(t, err) - } else { - assert.Equal(t, push.expectedError, err) - } + if push.expectedError == nil { + assert.Nil(t, err) + } else { + assert.Equal(t, push.expectedError, err) + } - d.ingestionRate.Tick() + d.ingestionRate.Tick() - if testData.expectedMetrics != "" { - assert.NoError(t, testutil.GatherAndCompare(regs[0], strings.NewReader(testData.expectedMetrics), testData.metricNames...)) + if testData.expectedMetrics != "" { + assert.NoError(t, testutil.GatherAndCompare(regs[0], strings.NewReader(testData.expectedMetrics), testData.metricNames...)) + } } - } - }) + }) + } } } @@ -904,40 +998,43 @@ func TestDistributor_PushHAInstances(t *testing.T) { for _, shardByAllLabels := range []bool{true, false} { tc := tc shardByAllLabels := shardByAllLabels - t.Run(fmt.Sprintf("[%d](shardByAllLabels=%v)", i, shardByAllLabels), func(t *testing.T) { - t.Parallel() - var limits validation.Limits - flagext.DefaultValues(&limits) - limits.AcceptHASamples = true - limits.MaxLabelValueLength = 15 - - ds, _, _, _ := prepare(t, prepConfig{ - numIngesters: 3, - happyIngesters: 3, - numDistributors: 1, - shardByAllLabels: shardByAllLabels, - limits: &limits, - enableTracker: tc.enableTracker, - }) + for _, enableHistogram := range []bool{true, false} { + enableHistogram := enableHistogram + t.Run(fmt.Sprintf("[%d](shardByAllLabels=%v, histogram=%v)", i, shardByAllLabels, enableHistogram), func(t *testing.T) { + t.Parallel() + var limits validation.Limits + flagext.DefaultValues(&limits) + limits.AcceptHASamples = true + limits.MaxLabelValueLength = 15 + + ds, _, _, _ := prepare(t, prepConfig{ + numIngesters: 3, + happyIngesters: 3, + numDistributors: 1, + shardByAllLabels: shardByAllLabels, + limits: &limits, + enableTracker: tc.enableTracker, + }) - d := ds[0] + d := ds[0] - userID, err := tenant.TenantID(ctx) - assert.NoError(t, err) - err = d.HATracker.CheckReplica(ctx, userID, tc.cluster, tc.acceptedReplica, time.Now()) - assert.NoError(t, err) + userID, err := tenant.TenantID(ctx) + assert.NoError(t, err) + err = d.HATracker.CheckReplica(ctx, userID, tc.cluster, tc.acceptedReplica, time.Now()) + assert.NoError(t, err) - request := makeWriteRequestHA(tc.samples, tc.testReplica, tc.cluster) - response, err := d.Push(ctx, request) - assert.Equal(t, tc.expectedResponse, response) + request := makeWriteRequestHA(tc.samples, tc.testReplica, tc.cluster, enableHistogram) + response, err := d.Push(ctx, request) + assert.Equal(t, tc.expectedResponse, response) - httpResp, ok := httpgrpc.HTTPResponseFromError(err) - if ok { - assert.Equal(t, tc.expectedCode, httpResp.Code) - } else if tc.expectedCode != 0 { - assert.Fail(t, "expected HTTP status code", tc.expectedCode) - } - }) + httpResp, ok := httpgrpc.HTTPResponseFromError(err) + if ok { + assert.Equal(t, tc.expectedCode, httpResp.Code) + } else if tc.expectedCode != 0 { + assert.Fail(t, "expected HTTP status code", tc.expectedCode) + } + }) + } } } } @@ -1087,7 +1184,7 @@ func TestDistributor_PushQuery(t *testing.T) { shuffleShardSize: shuffleShardSize, }) - request := makeWriteRequest(0, tc.samples, tc.metadata) + request := makeWriteRequest(0, tc.samples, tc.metadata, 0) writeResponse, err := ds[0].Push(ctx, request) assert.Equal(t, &cortexpb.WriteResponse{}, writeResponse) assert.Nil(t, err) @@ -1119,250 +1216,277 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIsReac t.Parallel() const maxChunksLimit = 30 // Chunks are duplicated due to replication factor. - ctx := user.InjectOrgID(context.Background(), "user") - limits := &validation.Limits{} - flagext.DefaultValues(limits) - limits.MaxChunksPerQuery = maxChunksLimit + for _, histogram := range []bool{true, false} { + ctx := user.InjectOrgID(context.Background(), "user") + limits := &validation.Limits{} + flagext.DefaultValues(limits) + limits.MaxChunksPerQuery = maxChunksLimit - // Prepare distributors. - ds, _, _, _ := prepare(t, prepConfig{ - numIngesters: 3, - happyIngesters: 3, - numDistributors: 1, - shardByAllLabels: true, - limits: limits, - }) + // Prepare distributors. + ds, _, _, _ := prepare(t, prepConfig{ + numIngesters: 3, + happyIngesters: 3, + numDistributors: 1, + shardByAllLabels: true, + limits: limits, + }) - ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, 0, maxChunksLimit, 0)) + ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, 0, maxChunksLimit, 0)) - // Push a number of series below the max chunks limit. Each series has 1 sample, - // so expect 1 chunk per series when querying back. - initialSeries := maxChunksLimit / 3 - writeReq := makeWriteRequest(0, initialSeries, 0) - writeRes, err := ds[0].Push(ctx, writeReq) - assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) - assert.Nil(t, err) + // Push a number of series below the max chunks limit. Each series has 1 sample, + // so expect 1 chunk per series when querying back. + initialSeries := maxChunksLimit / 3 + var writeReq *cortexpb.WriteRequest + if histogram { + writeReq = makeWriteRequest(0, 0, 0, initialSeries) + } else { + writeReq = makeWriteRequest(0, initialSeries, 0, 0) + } + writeRes, err := ds[0].Push(ctx, writeReq) + assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) + assert.Nil(t, err) - allSeriesMatchers := []*labels.Matcher{ - labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"), - } + allSeriesMatchers := []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"), + } - // Since the number of series (and thus chunks) is equal to the limit (but doesn't - // exceed it), we expect a query running on all series to succeed. - queryRes, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) - require.NoError(t, err) - assert.Len(t, queryRes.Chunkseries, initialSeries) + // Since the number of series (and thus chunks) is equal to the limit (but doesn't + // exceed it), we expect a query running on all series to succeed. + queryRes, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) + require.NoError(t, err) + assert.Len(t, queryRes.Chunkseries, initialSeries) - // Push more series to exceed the limit once we'll query back all series. - writeReq = &cortexpb.WriteRequest{} - for i := 0; i < maxChunksLimit; i++ { - writeReq.Timeseries = append(writeReq.Timeseries, - makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: fmt.Sprintf("another_series_%d", i)}}, 0, 0), - ) - } + // Push more series to exceed the limit once we'll query back all series. + writeReq = &cortexpb.WriteRequest{} + for i := 0; i < maxChunksLimit; i++ { + writeReq.Timeseries = append(writeReq.Timeseries, + makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: fmt.Sprintf("another_series_%d", i)}}, 0, 0, histogram), + ) + } - writeRes, err = ds[0].Push(ctx, writeReq) - assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) - assert.Nil(t, err) + writeRes, err = ds[0].Push(ctx, writeReq) + assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) + assert.Nil(t, err) - // Since the number of series (and thus chunks) is exceeding to the limit, we expect - // a query running on all series to fail. - _, err = ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) - require.Error(t, err) - assert.Contains(t, err.Error(), "the query hit the max number of chunks limit") + // Since the number of series (and thus chunks) is exceeding to the limit, we expect + // a query running on all series to fail. + _, err = ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) + require.Error(t, err) + assert.Contains(t, err.Error(), "the query hit the max number of chunks limit") + } } func TestDistributor_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIsReached(t *testing.T) { t.Parallel() const maxSeriesLimit = 10 - ctx := user.InjectOrgID(context.Background(), "user") - limits := &validation.Limits{} - flagext.DefaultValues(limits) - ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(maxSeriesLimit, 0, 0, 0)) + for _, histogram := range []bool{true, false} { + ctx := user.InjectOrgID(context.Background(), "user") + limits := &validation.Limits{} + flagext.DefaultValues(limits) + ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(maxSeriesLimit, 0, 0, 0)) - // Prepare distributors. - ds, _, _, _ := prepare(t, prepConfig{ - numIngesters: 3, - happyIngesters: 3, - numDistributors: 1, - shardByAllLabels: true, - limits: limits, - }) + // Prepare distributors. + ds, _, _, _ := prepare(t, prepConfig{ + numIngesters: 3, + happyIngesters: 3, + numDistributors: 1, + shardByAllLabels: true, + limits: limits, + }) - // Push a number of series below the max series limit. - initialSeries := maxSeriesLimit - writeReq := makeWriteRequest(0, initialSeries, 0) - writeRes, err := ds[0].Push(ctx, writeReq) - assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) - assert.Nil(t, err) + // Push a number of series below the max series limit. + initialSeries := maxSeriesLimit + var writeReq *cortexpb.WriteRequest + if histogram { + writeReq = makeWriteRequest(0, 0, 0, initialSeries) + } else { + writeReq = makeWriteRequest(0, initialSeries, 0, 0) + } - allSeriesMatchers := []*labels.Matcher{ - labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"), - } + writeRes, err := ds[0].Push(ctx, writeReq) + assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) + assert.Nil(t, err) - // Since the number of series is equal to the limit (but doesn't - // exceed it), we expect a query running on all series to succeed. - queryRes, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) - require.NoError(t, err) - assert.Len(t, queryRes.Chunkseries, initialSeries) + allSeriesMatchers := []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"), + } - // Push more series to exceed the limit once we'll query back all series. - writeReq = &cortexpb.WriteRequest{} - writeReq.Timeseries = append(writeReq.Timeseries, - makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0), - ) + // Since the number of series is equal to the limit (but doesn't + // exceed it), we expect a query running on all series to succeed. + queryRes, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) + require.NoError(t, err) + assert.Len(t, queryRes.Chunkseries, initialSeries) - writeRes, err = ds[0].Push(ctx, writeReq) - assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) - assert.Nil(t, err) + // Push more series to exceed the limit once we'll query back all series. + writeReq = &cortexpb.WriteRequest{} + writeReq.Timeseries = append(writeReq.Timeseries, + makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0, histogram), + ) - // Since the number of series is exceeding the limit, we expect - // a query running on all series to fail. - _, err = ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) - require.Error(t, err) - assert.Contains(t, err.Error(), "max number of series limit") + writeRes, err = ds[0].Push(ctx, writeReq) + assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) + assert.Nil(t, err) + // Since the number of series is exceeding the limit, we expect + // a query running on all series to fail. + _, err = ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) + require.Error(t, err) + assert.Contains(t, err.Error(), "max number of series limit") + } } func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLimitIsReached(t *testing.T) { t.Parallel() const seriesToAdd = 10 - ctx := user.InjectOrgID(context.Background(), "user") - limits := &validation.Limits{} - flagext.DefaultValues(limits) - - // Prepare distributors. - // Use replication factor of 2 to always read all the chunks from both ingesters, - // this guarantees us to always read the same chunks and have a stable test. - ds, _, _, _ := prepare(t, prepConfig{ - numIngesters: 2, - happyIngesters: 2, - numDistributors: 1, - shardByAllLabels: true, - limits: limits, - replicationFactor: 2, - }) + for _, histogram := range []bool{true, false} { + ctx := user.InjectOrgID(context.Background(), "user") + limits := &validation.Limits{} + flagext.DefaultValues(limits) + + // Prepare distributors. + // Use replication factor of 2 to always read all the chunks from both ingesters, + // this guarantees us to always read the same chunks and have a stable test. + ds, _, _, _ := prepare(t, prepConfig{ + numIngesters: 2, + happyIngesters: 2, + numDistributors: 1, + shardByAllLabels: true, + limits: limits, + replicationFactor: 2, + }) - allSeriesMatchers := []*labels.Matcher{ - labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"), - } - // Push a single series to allow us to calculate the chunk size to calculate the limit for the test. - writeReq := &cortexpb.WriteRequest{} - writeReq.Timeseries = append(writeReq.Timeseries, - makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0), - ) - writeRes, err := ds[0].Push(ctx, writeReq) - assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) - assert.Nil(t, err) - chunkSizeResponse, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) - require.NoError(t, err) + allSeriesMatchers := []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"), + } + // Push a single series to allow us to calculate the chunk size to calculate the limit for the test. + writeReq := &cortexpb.WriteRequest{} + writeReq.Timeseries = append(writeReq.Timeseries, + makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0, histogram), + ) + writeRes, err := ds[0].Push(ctx, writeReq) + assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) + assert.Nil(t, err) + chunkSizeResponse, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) + require.NoError(t, err) - // Use the resulting chunks size to calculate the limit as (series to add + our test series) * the response chunk size. - var responseChunkSize = chunkSizeResponse.ChunksSize() - var maxBytesLimit = (seriesToAdd) * responseChunkSize + // Use the resulting chunks size to calculate the limit as (series to add + our test series) * the response chunk size. + var responseChunkSize = chunkSizeResponse.ChunksSize() + var maxBytesLimit = (seriesToAdd) * responseChunkSize - // Update the limiter with the calculated limits. - ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, maxBytesLimit, 0, 0)) + // Update the limiter with the calculated limits. + ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, maxBytesLimit, 0, 0)) - // Push a number of series below the max chunk bytes limit. Subtract one for the series added above. - writeReq = makeWriteRequest(0, seriesToAdd-1, 0) - writeRes, err = ds[0].Push(ctx, writeReq) - assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) - assert.Nil(t, err) + // Push a number of series below the max chunk bytes limit. Subtract one for the series added above. + if histogram { + writeReq = makeWriteRequest(0, 0, 0, seriesToAdd-1) + } else { + writeReq = makeWriteRequest(0, seriesToAdd-1, 0, 0) + } + writeRes, err = ds[0].Push(ctx, writeReq) + assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) + assert.Nil(t, err) - // Since the number of chunk bytes is equal to the limit (but doesn't - // exceed it), we expect a query running on all series to succeed. - queryRes, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) - require.NoError(t, err) - assert.Len(t, queryRes.Chunkseries, seriesToAdd) + // Since the number of chunk bytes is equal to the limit (but doesn't + // exceed it), we expect a query running on all series to succeed. + queryRes, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) + require.NoError(t, err) + assert.Len(t, queryRes.Chunkseries, seriesToAdd) - // Push another series to exceed the chunk bytes limit once we'll query back all series. - writeReq = &cortexpb.WriteRequest{} - writeReq.Timeseries = append(writeReq.Timeseries, - makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series_1"}}, 0, 0), - ) + // Push another series to exceed the chunk bytes limit once we'll query back all series. + writeReq = &cortexpb.WriteRequest{} + writeReq.Timeseries = append(writeReq.Timeseries, + makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series_1"}}, 0, 0, histogram), + ) - writeRes, err = ds[0].Push(ctx, writeReq) - assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) - assert.Nil(t, err) + writeRes, err = ds[0].Push(ctx, writeReq) + assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) + assert.Nil(t, err) - // Since the aggregated chunk size is exceeding the limit, we expect - // a query running on all series to fail. - _, err = ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) - require.Error(t, err) - assert.Equal(t, err, validation.LimitError(fmt.Sprintf(limiter.ErrMaxChunkBytesHit, maxBytesLimit))) + // Since the aggregated chunk size is exceeding the limit, we expect + // a query running on all series to fail. + _, err = ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) + require.Error(t, err) + assert.Equal(t, err, validation.LimitError(fmt.Sprintf(limiter.ErrMaxChunkBytesHit, maxBytesLimit))) + } } func TestDistributor_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimitIsReached(t *testing.T) { t.Parallel() const seriesToAdd = 10 - ctx := user.InjectOrgID(context.Background(), "user") - limits := &validation.Limits{} - flagext.DefaultValues(limits) + for _, histogram := range []bool{true, false} { + ctx := user.InjectOrgID(context.Background(), "user") + limits := &validation.Limits{} + flagext.DefaultValues(limits) + + // Prepare distributors. + // Use replication factor of 2 to always read all the chunks from both ingesters, + // this guarantees us to always read the same chunks and have a stable test. + ds, _, _, _ := prepare(t, prepConfig{ + numIngesters: 2, + happyIngesters: 2, + numDistributors: 1, + shardByAllLabels: true, + limits: limits, + replicationFactor: 2, + }) - // Prepare distributors. - // Use replication factor of 2 to always read all the chunks from both ingesters, - // this guarantees us to always read the same chunks and have a stable test. - ds, _, _, _ := prepare(t, prepConfig{ - numIngesters: 2, - happyIngesters: 2, - numDistributors: 1, - shardByAllLabels: true, - limits: limits, - replicationFactor: 2, - }) + allSeriesMatchers := []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"), + } + // Push a single series to allow us to calculate the label size to calculate the limit for the test. + writeReq := &cortexpb.WriteRequest{} + writeReq.Timeseries = append(writeReq.Timeseries, + makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0, histogram), + ) - allSeriesMatchers := []*labels.Matcher{ - labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"), - } - // Push a single series to allow us to calculate the label size to calculate the limit for the test. - writeReq := &cortexpb.WriteRequest{} - writeReq.Timeseries = append(writeReq.Timeseries, - makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0), - ) - writeRes, err := ds[0].Push(ctx, writeReq) - assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) - assert.Nil(t, err) - dataSizeResponse, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) - require.NoError(t, err) + writeRes, err := ds[0].Push(ctx, writeReq) + assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) + assert.Nil(t, err) + dataSizeResponse, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) + require.NoError(t, err) - // Use the resulting chunks size to calculate the limit as (series to add + our test series) * the response chunk size. - var dataSize = dataSizeResponse.Size() - var maxBytesLimit = (seriesToAdd) * dataSize * 2 // Multiplying by RF because the limit is applied before de-duping. + // Use the resulting chunks size to calculate the limit as (series to add + our test series) * the response chunk size. + var dataSize = dataSizeResponse.Size() + var maxBytesLimit = (seriesToAdd) * dataSize * 2 // Multiplying by RF because the limit is applied before de-duping. - // Update the limiter with the calculated limits. - ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, 0, 0, maxBytesLimit)) + // Update the limiter with the calculated limits. + ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, 0, 0, maxBytesLimit)) - // Push a number of series below the max chunk bytes limit. Subtract one for the series added above. - writeReq = makeWriteRequest(0, seriesToAdd-1, 0) - writeRes, err = ds[0].Push(ctx, writeReq) - assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) - assert.Nil(t, err) + // Push a number of series below the max chunk bytes limit. Subtract one for the series added above. + if histogram { + writeReq = makeWriteRequest(0, 0, 0, seriesToAdd-1) + } else { + writeReq = makeWriteRequest(0, seriesToAdd-1, 0, 0) + } + writeRes, err = ds[0].Push(ctx, writeReq) + assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) + assert.Nil(t, err) - // Since the number of chunk bytes is equal to the limit (but doesn't - // exceed it), we expect a query running on all series to succeed. - queryRes, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) - require.NoError(t, err) - assert.Len(t, queryRes.Chunkseries, seriesToAdd) + // Since the number of chunk bytes is equal to the limit (but doesn't + // exceed it), we expect a query running on all series to succeed. + queryRes, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) + require.NoError(t, err) + assert.Len(t, queryRes.Chunkseries, seriesToAdd) - // Push another series to exceed the chunk bytes limit once we'll query back all series. - writeReq = &cortexpb.WriteRequest{} - writeReq.Timeseries = append(writeReq.Timeseries, - makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series_1"}}, 0, 0), - ) + // Push another series to exceed the chunk bytes limit once we'll query back all series. + writeReq = &cortexpb.WriteRequest{} + writeReq.Timeseries = append(writeReq.Timeseries, + makeWriteRequestTimeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series_1"}}, 0, 0, histogram), + ) - writeRes, err = ds[0].Push(ctx, writeReq) - assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) - assert.Nil(t, err) + writeRes, err = ds[0].Push(ctx, writeReq) + assert.Equal(t, &cortexpb.WriteResponse{}, writeRes) + assert.Nil(t, err) - // Since the aggregated chunk size is exceeding the limit, we expect - // a query running on all series to fail. - _, err = ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) - require.Error(t, err) - assert.Equal(t, err, validation.LimitError(fmt.Sprintf(limiter.ErrMaxDataBytesHit, maxBytesLimit))) + // Since the aggregated chunk size is exceeding the limit, we expect + // a query running on all series to fail. + _, err = ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) + require.Error(t, err) + assert.Equal(t, err, validation.LimitError(fmt.Sprintf(limiter.ErrMaxDataBytesHit, maxBytesLimit))) + } } func TestDistributor_Push_LabelRemoval(t *testing.T) { @@ -1423,32 +1547,34 @@ func TestDistributor_Push_LabelRemoval(t *testing.T) { } for _, tc := range cases { - var err error - var limits validation.Limits - flagext.DefaultValues(&limits) - limits.DropLabels = tc.removeLabels - limits.AcceptHASamples = tc.removeReplica - - ds, ingesters, _, _ := prepare(t, prepConfig{ - numIngesters: 2, - happyIngesters: 2, - numDistributors: 1, - shardByAllLabels: true, - limits: &limits, - }) + for _, histogram := range []bool{true, false} { + var err error + var limits validation.Limits + flagext.DefaultValues(&limits) + limits.DropLabels = tc.removeLabels + limits.AcceptHASamples = tc.removeReplica - // Push the series to the distributor - req := mockWriteRequest([]labels.Labels{tc.inputSeries}, 1, 1) - _, err = ds[0].Push(ctx, req) - require.NoError(t, err) + ds, ingesters, _, _ := prepare(t, prepConfig{ + numIngesters: 2, + happyIngesters: 2, + numDistributors: 1, + shardByAllLabels: true, + limits: &limits, + }) + + // Push the series to the distributor + req := mockWriteRequest([]labels.Labels{tc.inputSeries}, 1, 1, histogram) + _, err = ds[0].Push(ctx, req) + require.NoError(t, err) - // Since each test pushes only 1 series, we do expect the ingester - // to have received exactly 1 series - for i := range ingesters { - timeseries := ingesters[i].series() - assert.Equal(t, 1, len(timeseries)) - for _, v := range timeseries { - assert.Equal(t, tc.expectedSeries, cortexpb.FromLabelAdaptersToLabels(v.Labels)) + // Since each test pushes only 1 series, we do expect the ingester + // to have received exactly 1 series + for i := range ingesters { + timeseries := ingesters[i].series() + assert.Equal(t, 1, len(timeseries)) + for _, v := range timeseries { + assert.Equal(t, tc.expectedSeries, cortexpb.FromLabelAdaptersToLabels(v.Labels)) + } } } } @@ -1490,7 +1616,7 @@ func TestDistributor_Push_LabelRemoval_RemovingNameLabelWillError(t *testing.T) }) // Push the series to the distributor - req := mockWriteRequest([]labels.Labels{tc.inputSeries}, 1, 1) + req := mockWriteRequest([]labels.Labels{tc.inputSeries}, 1, 1, false) _, err = ds[0].Push(ctx, req) require.Error(t, err) assert.Equal(t, "rpc error: code = Code(400) desc = sample missing metric name", err.Error()) @@ -1587,7 +1713,7 @@ func TestDistributor_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime(t * }) // Push the series to the distributor - req := mockWriteRequest([]labels.Labels{testData.inputSeries}, 1, 1) + req := mockWriteRequest([]labels.Labels{testData.inputSeries}, 1, 1, false) _, err := ds[0].Push(ctx, req) require.NoError(t, err) @@ -1639,25 +1765,28 @@ func TestDistributor_Push_LabelNameValidation(t *testing.T) { for testName, tc := range tests { tc := tc - t.Run(testName, func(t *testing.T) { - t.Parallel() - ds, _, _, _ := prepare(t, prepConfig{ - numIngesters: 2, - happyIngesters: 2, - numDistributors: 1, - shuffleShardSize: 1, - skipLabelNameValidation: tc.skipLabelNameValidationCfg, + for _, histogram := range []bool{true, false} { + histogram := histogram + t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(histogram)), func(t *testing.T) { + t.Parallel() + ds, _, _, _ := prepare(t, prepConfig{ + numIngesters: 2, + happyIngesters: 2, + numDistributors: 1, + shuffleShardSize: 1, + skipLabelNameValidation: tc.skipLabelNameValidationCfg, + }) + req := mockWriteRequest([]labels.Labels{tc.inputLabels}, 42, 100000, histogram) + req.SkipLabelNameValidation = tc.skipLabelNameValidationReq + _, err := ds[0].Push(ctx, req) + if tc.errExpected { + fromError, _ := status.FromError(err) + assert.Equal(t, tc.errMessage, fromError.Message()) + } else { + assert.Nil(t, err) + } }) - req := mockWriteRequest([]labels.Labels{tc.inputLabels}, 42, 100000) - req.SkipLabelNameValidation = tc.skipLabelNameValidationReq - _, err := ds[0].Push(ctx, req) - if tc.errExpected { - fromError, _ := status.FromError(err) - assert.Equal(t, tc.errMessage, fromError.Message()) - } else { - assert.Nil(t, err) - } - }) + } } } @@ -2117,32 +2246,34 @@ func TestSlowQueries(t *testing.T) { func TestDistributor_MetricsForLabelMatchers_SingleSlowIngester(t *testing.T) { t.Parallel() - // Create distributor - ds, ing, _, _ := prepare(t, prepConfig{ - numIngesters: 3, - happyIngesters: 3, - numDistributors: 1, - shardByAllLabels: true, - shuffleShardEnabled: true, - shuffleShardSize: 3, - replicationFactor: 3, - }) + for _, histogram := range []bool{true, false} { + // Create distributor + ds, ing, _, _ := prepare(t, prepConfig{ + numIngesters: 3, + happyIngesters: 3, + numDistributors: 1, + shardByAllLabels: true, + shuffleShardEnabled: true, + shuffleShardSize: 3, + replicationFactor: 3, + }) - ing[2].queryDelay = 50 * time.Millisecond + ing[2].queryDelay = 50 * time.Millisecond - ctx := user.InjectOrgID(context.Background(), "test") + ctx := user.InjectOrgID(context.Background(), "test") - now := model.Now() + now := model.Now() - for i := 0; i < 100; i++ { - req := mockWriteRequest([]labels.Labels{{{Name: labels.MetricName, Value: "test"}, {Name: "app", Value: "m"}, {Name: "uniq8", Value: strconv.Itoa(i)}}}, 1, now.Unix()) - _, err := ds[0].Push(ctx, req) - require.NoError(t, err) - } + for i := 0; i < 100; i++ { + req := mockWriteRequest([]labels.Labels{{{Name: labels.MetricName, Value: "test"}, {Name: "app", Value: "m"}, {Name: "uniq8", Value: strconv.Itoa(i)}}}, 1, now.Unix(), histogram) + _, err := ds[0].Push(ctx, req) + require.NoError(t, err) + } - for i := 0; i < 50; i++ { - _, err := ds[0].MetricsForLabelMatchers(ctx, now, now, mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test")) - require.NoError(t, err) + for i := 0; i < 50; i++ { + _, err := ds[0].MetricsForLabelMatchers(ctx, now, now, mustNewMatcher(labels.MatchEqual, model.MetricNameLabel, "test")) + require.NoError(t, err) + } } } @@ -2152,7 +2283,7 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) { fixtures := []struct { lbls labels.Labels - value float64 + value int64 timestamp int64 }{ {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}}, 1, 100000}, @@ -2282,61 +2413,64 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) { for testName, testData := range tests { testData := testData - t.Run(testName, func(t *testing.T) { - t.Parallel() - now := model.Now() - - // Create distributor - ds, ingesters, _, _ := prepare(t, prepConfig{ - numIngesters: numIngesters, - happyIngesters: numIngesters, - numDistributors: 1, - shardByAllLabels: true, - shuffleShardEnabled: testData.shuffleShardEnabled, - shuffleShardSize: testData.shuffleShardSize, - }) - - // Push fixtures - ctx := user.InjectOrgID(context.Background(), "test") - ctx = limiter.AddQueryLimiterToContext(ctx, testData.queryLimiter) + for _, histogram := range []bool{true, false} { + histogram := histogram + t.Run(fmt.Sprintf("%s, histogram=%s", testName, strconv.FormatBool(histogram)), func(t *testing.T) { + t.Parallel() + now := model.Now() - for _, series := range fixtures { - req := mockWriteRequest([]labels.Labels{series.lbls}, series.value, series.timestamp) - _, err := ds[0].Push(ctx, req) - require.NoError(t, err) - } + // Create distributor + ds, ingesters, _, _ := prepare(t, prepConfig{ + numIngesters: numIngesters, + happyIngesters: numIngesters, + numDistributors: 1, + shardByAllLabels: true, + shuffleShardEnabled: testData.shuffleShardEnabled, + shuffleShardSize: testData.shuffleShardSize, + }) - { - metrics, err := ds[0].MetricsForLabelMatchers(ctx, now, now, testData.matchers...) + // Push fixtures + ctx := user.InjectOrgID(context.Background(), "test") + ctx = limiter.AddQueryLimiterToContext(ctx, testData.queryLimiter) - if testData.expectedErr != nil { - assert.ErrorIs(t, err, testData.expectedErr) - return + for _, series := range fixtures { + req := mockWriteRequest([]labels.Labels{series.lbls}, series.value, series.timestamp, histogram) + _, err := ds[0].Push(ctx, req) + require.NoError(t, err) } - require.NoError(t, err) - assert.ElementsMatch(t, testData.expectedResult, metrics) + { + metrics, err := ds[0].MetricsForLabelMatchers(ctx, now, now, testData.matchers...) - // Check how many ingesters have been queried. - // Due to the quorum the distributor could cancel the last request towards ingesters - // if all other ones are successful, so we're good either has been queried X or X-1 - // ingesters. - assert.Contains(t, []int{testData.expectedIngesters, testData.expectedIngesters - 1}, countMockIngestersCalls(ingesters, "MetricsForLabelMatchers")) - } + if testData.expectedErr != nil { + assert.ErrorIs(t, err, testData.expectedErr) + return + } - { - metrics, err := ds[0].MetricsForLabelMatchersStream(ctx, now, now, testData.matchers...) - if testData.expectedErr != nil { - assert.ErrorIs(t, err, testData.expectedErr) - return + require.NoError(t, err) + assert.ElementsMatch(t, testData.expectedResult, metrics) + + // Check how many ingesters have been queried. + // Due to the quorum the distributor could cancel the last request towards ingesters + // if all other ones are successful, so we're good either has been queried X or X-1 + // ingesters. + assert.Contains(t, []int{testData.expectedIngesters, testData.expectedIngesters - 1}, countMockIngestersCalls(ingesters, "MetricsForLabelMatchers")) } - require.NoError(t, err) - assert.ElementsMatch(t, testData.expectedResult, metrics) + { + metrics, err := ds[0].MetricsForLabelMatchersStream(ctx, now, now, testData.matchers...) + if testData.expectedErr != nil { + assert.ErrorIs(t, err, testData.expectedErr) + return + } - assert.Contains(t, []int{testData.expectedIngesters, testData.expectedIngesters - 1}, countMockIngestersCalls(ingesters, "MetricsForLabelMatchersStream")) - } - }) + require.NoError(t, err) + assert.ElementsMatch(t, testData.expectedResult, metrics) + + assert.Contains(t, []int{testData.expectedIngesters, testData.expectedIngesters - 1}, countMockIngestersCalls(ingesters, "MetricsForLabelMatchersStream")) + } + }) + } } } @@ -2474,7 +2608,7 @@ func TestDistributor_MetricsMetadata(t *testing.T) { // Push metadata ctx := user.InjectOrgID(context.Background(), "test") - req := makeWriteRequest(0, 0, 10) + req := makeWriteRequest(0, 0, 10, 0) _, err := ds[0].Push(ctx, req) require.NoError(t, err) @@ -2501,32 +2635,27 @@ func mustNewMatcher(t labels.MatchType, n, v string) *labels.Matcher { return m } -func mockWriteRequest(lbls []labels.Labels, value float64, timestampMs int64) *cortexpb.WriteRequest { - samples := make([]cortexpb.Sample, len(lbls)) - for i := range lbls { - samples[i] = cortexpb.Sample{ - TimestampMs: timestampMs, - Value: value, +func mockWriteRequest(lbls []labels.Labels, value int64, timestampMs int64, histogram bool) *cortexpb.WriteRequest { + var ( + samples []cortexpb.Sample + histograms []cortexpb.Histogram + ) + if histogram { + histograms = make([]cortexpb.Histogram, len(lbls)) + for i := range lbls { + histograms[i] = cortexpb.HistogramToHistogramProto(timestampMs, histogram_util.GenerateTestHistogram(int(value))) } - } - - return cortexpb.ToWriteRequest(lbls, samples, nil, nil, cortexpb.API) -} - -// nolint:unused -func mockHistogramWriteRequest(lbls []labels.Labels, value int, timestampMs int64, float bool) *cortexpb.WriteRequest { - histograms := make([]cortexpb.Histogram, len(lbls)) - for i := range lbls { - if float { - fh := histogram_util.GenerateTestFloatHistogram(value) - histograms[i] = cortexpb.FloatHistogramToHistogramProto(timestampMs, fh) - continue + } else { + samples = make([]cortexpb.Sample, len(lbls)) + for i := range lbls { + samples[i] = cortexpb.Sample{ + TimestampMs: timestampMs, + Value: float64(value), + } } - h := histogram_util.GenerateTestHistogram(value) - histograms[i] = cortexpb.HistogramToHistogramProto(timestampMs, h) } - return cortexpb.ToWriteRequest(lbls, nil, nil, histograms, cortexpb.API) + return cortexpb.ToWriteRequest(lbls, samples, nil, histograms, cortexpb.API) } type prepConfig struct { @@ -2714,7 +2843,7 @@ func stopAll(ds []*Distributor, r *ring.Ring) { r.StopAsync() } -func makeWriteRequest(startTimestampMs int64, samples int, metadata int) *cortexpb.WriteRequest { +func makeWriteRequest(startTimestampMs int64, samples int, metadata int, histograms int) *cortexpb.WriteRequest { request := &cortexpb.WriteRequest{} for i := 0; i < samples; i++ { request.Timeseries = append(request.Timeseries, makeWriteRequestTimeseries( @@ -2722,7 +2851,16 @@ func makeWriteRequest(startTimestampMs int64, samples int, metadata int) *cortex {Name: model.MetricNameLabel, Value: "foo"}, {Name: "bar", Value: "baz"}, {Name: "sample", Value: fmt.Sprintf("%d", i)}, - }, startTimestampMs+int64(i), float64(i))) + }, startTimestampMs+int64(i), i, false)) + } + + for i := 0; i < histograms; i++ { + request.Timeseries = append(request.Timeseries, makeWriteRequestTimeseries( + []cortexpb.LabelAdapter{ + {Name: model.MetricNameLabel, Value: "foo"}, + {Name: "bar", Value: "baz"}, + {Name: "histogram", Value: fmt.Sprintf("%d", i)}, + }, startTimestampMs+int64(i), i, true)) } for i := 0; i < metadata; i++ { @@ -2737,21 +2875,24 @@ func makeWriteRequest(startTimestampMs int64, samples int, metadata int) *cortex return request } -func makeWriteRequestTimeseries(labels []cortexpb.LabelAdapter, ts int64, value float64) cortexpb.PreallocTimeseries { - return cortexpb.PreallocTimeseries{ +func makeWriteRequestTimeseries(labels []cortexpb.LabelAdapter, ts int64, value int, histogram bool) cortexpb.PreallocTimeseries { + t := cortexpb.PreallocTimeseries{ TimeSeries: &cortexpb.TimeSeries{ Labels: labels, - Samples: []cortexpb.Sample{ - { - Value: value, - TimestampMs: ts, - }, - }, }, } + if histogram { + t.Histograms = append(t.Histograms, cortexpb.HistogramToHistogramProto(ts, histogram_util.GenerateTestHistogram(value))) + } else { + t.Samples = append(t.Samples, cortexpb.Sample{ + TimestampMs: ts, + Value: float64(value), + }) + } + return t } -func makeWriteRequestHA(samples int, replica, cluster string) *cortexpb.WriteRequest { +func makeWriteRequestHA(samples int, replica, cluster string, histogram bool) *cortexpb.WriteRequest { request := &cortexpb.WriteRequest{} for i := 0; i < samples; i++ { ts := cortexpb.PreallocTimeseries{ @@ -2765,11 +2906,17 @@ func makeWriteRequestHA(samples int, replica, cluster string) *cortexpb.WriteReq }, }, } - ts.Samples = []cortexpb.Sample{ - { - Value: float64(i), - TimestampMs: int64(i), - }, + if histogram { + ts.Histograms = []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(int64(i), histogram_util.GenerateTestHistogram(i)), + } + } else { + ts.Samples = []cortexpb.Sample{ + { + Value: float64(i), + TimestampMs: int64(i), + }, + } } request.Timeseries = append(request.Timeseries, ts) } @@ -3207,12 +3354,15 @@ func TestDistributorValidation(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "1") now := model.Now() future, past := now.Add(5*time.Hour), now.Add(-25*time.Hour) + testHistogram := histogram_util.GenerateTestHistogram(1) + testFloatHistogram := histogram_util.GenerateTestFloatHistogram(1) for i, tc := range []struct { - metadata []*cortexpb.MetricMetadata - labels []labels.Labels - samples []cortexpb.Sample - err error + metadata []*cortexpb.MetricMetadata + labels []labels.Labels + samples []cortexpb.Sample + histograms []cortexpb.Histogram + err error }{ // Test validation passes. { @@ -3222,6 +3372,9 @@ func TestDistributorValidation(t *testing.T) { TimestampMs: int64(now), Value: 1, }}, + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(int64(now), testHistogram), + }, }, // Test validation fails for very old samples. { @@ -3273,6 +3426,30 @@ func TestDistributorValidation(t *testing.T) { }}, err: httpgrpc.Errorf(http.StatusBadRequest, `metadata missing metric name`), }, + // Test maximum labels names per series for histogram samples. + { + labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}, {Name: "foo2", Value: "bar2"}}}, + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(int64(now), testHistogram), + }, + err: httpgrpc.Errorf(http.StatusBadRequest, `series has too many labels (actual: 3, limit: 2) series: 'testmetric{foo2="bar2", foo="bar"}'`), + }, + // Test validation fails for very old histogram samples. + { + labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(int64(past), testHistogram), + }, + err: httpgrpc.Errorf(http.StatusBadRequest, `timestamp too old: %d metric: "testmetric"`, past), + }, + // Test validation fails for histogram samples from the future. + { + labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + histograms: []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(int64(future), testFloatHistogram), + }, + err: httpgrpc.Errorf(http.StatusBadRequest, `timestamp too new: %d metric: "testmetric"`, future), + }, } { tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { @@ -3293,7 +3470,7 @@ func TestDistributorValidation(t *testing.T) { limits: &limits, }) - _, err := ds[0].Push(ctx, cortexpb.ToWriteRequest(tc.labels, tc.samples, tc.metadata, nil, cortexpb.API)) + _, err := ds[0].Push(ctx, cortexpb.ToWriteRequest(tc.labels, tc.samples, tc.metadata, tc.histograms, cortexpb.API)) require.Equal(t, tc.err, err) }) } @@ -3465,36 +3642,39 @@ func TestDistributor_Push_Relabel(t *testing.T) { for _, tc := range cases { tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - var err error - var limits validation.Limits - flagext.DefaultValues(&limits) - limits.MetricRelabelConfigs = tc.metricRelabelConfigs + for _, enableHistogram := range []bool{false, true} { + enableHistogram := enableHistogram + t.Run(fmt.Sprintf("%s, histogram=%s", tc.name, strconv.FormatBool(enableHistogram)), func(t *testing.T) { + t.Parallel() + var err error + var limits validation.Limits + flagext.DefaultValues(&limits) + limits.MetricRelabelConfigs = tc.metricRelabelConfigs - ds, ingesters, _, _ := prepare(t, prepConfig{ - numIngesters: 2, - happyIngesters: 2, - numDistributors: 1, - shardByAllLabels: true, - limits: &limits, - }) + ds, ingesters, _, _ := prepare(t, prepConfig{ + numIngesters: 2, + happyIngesters: 2, + numDistributors: 1, + shardByAllLabels: true, + limits: &limits, + }) - // Push the series to the distributor - req := mockWriteRequest(tc.inputSeries, 1, 1) - _, err = ds[0].Push(ctx, req) - require.NoError(t, err) + // Push the series to the distributor + req := mockWriteRequest(tc.inputSeries, 1, 1, enableHistogram) + _, err = ds[0].Push(ctx, req) + require.NoError(t, err) - // Since each test pushes only 1 series, we do expect the ingester - // to have received exactly 1 series - for i := range ingesters { - timeseries := ingesters[i].series() - assert.Equal(t, 1, len(timeseries)) - for _, v := range timeseries { - assert.Equal(t, tc.expectedSeries, cortexpb.FromLabelAdaptersToLabels(v.Labels)) + // Since each test pushes only 1 series, we do expect the ingester + // to have received exactly 1 series + for i := range ingesters { + timeseries := ingesters[i].series() + assert.Equal(t, 1, len(timeseries)) + for _, v := range timeseries { + assert.Equal(t, tc.expectedSeries, cortexpb.FromLabelAdaptersToLabels(v.Labels)) + } } - } - }) + }) + } } } @@ -3560,7 +3740,7 @@ func TestDistributor_Push_EmptyLabel(t *testing.T) { }) // Push the series to the distributor - req := mockWriteRequest(tc.inputSeries, 1, 1) + req := mockWriteRequest(tc.inputSeries, 1, 1, false) _, err = ds[0].Push(ctx, req) require.NoError(t, err) @@ -3613,7 +3793,7 @@ func TestDistributor_Push_RelabelDropWillExportMetricOfDroppedSamples(t *testing }) // Push the series to the distributor - req := mockWriteRequest(inputSeries, 1, 1) + req := mockWriteRequest(inputSeries, 1, 1, false) ctx := user.InjectOrgID(context.Background(), "userDistributorPushRelabelDropWillExportMetricOfDroppedSamples") _, err = ds[0].Push(ctx, req) require.NoError(t, err) diff --git a/pkg/ingester/client/compat.go b/pkg/ingester/client/compat.go index fb3969a94bc..cbac06a46fb 100644 --- a/pkg/ingester/client/compat.go +++ b/pkg/ingester/client/compat.go @@ -113,25 +113,6 @@ func ToQueryResponse(matrix model.Matrix) *QueryResponse { return resp } -// FromQueryResponse unpacks a QueryResponse proto. -func FromQueryResponse(resp *QueryResponse) model.Matrix { - m := make(model.Matrix, 0, len(resp.Timeseries)) - for _, ts := range resp.Timeseries { - var ss model.SampleStream - ss.Metric = cortexpb.FromLabelAdaptersToMetric(ts.Labels) - ss.Values = make([]model.SamplePair, 0, len(ts.Samples)) - for _, s := range ts.Samples { - ss.Values = append(ss.Values, model.SamplePair{ - Value: model.SampleValue(s.Value), - Timestamp: model.Time(s.TimestampMs), - }) - } - m = append(m, &ss) - } - - return m -} - // ToMetricsForLabelMatchersRequest builds a MetricsForLabelMatchersRequest proto func ToMetricsForLabelMatchersRequest(from, to model.Time, matchers []*labels.Matcher) (*MetricsForLabelMatchersRequest, error) { ms, err := toLabelMatchers(matchers) diff --git a/pkg/ingester/client/compat_test.go b/pkg/ingester/client/compat_test.go index 56483abedd4..c9467abd0af 100644 --- a/pkg/ingester/client/compat_test.go +++ b/pkg/ingester/client/compat_test.go @@ -1,9 +1,7 @@ package client import ( - "fmt" "reflect" - "sort" "strconv" "testing" @@ -75,36 +73,6 @@ func matchersEqual(expected, actual []*labels.Matcher) bool { return true } -func buildTestMatrix(numSeries int, samplesPerSeries int, offset int) model.Matrix { - m := make(model.Matrix, 0, numSeries) - for i := 0; i < numSeries; i++ { - ss := model.SampleStream{ - Metric: model.Metric{ - model.MetricNameLabel: model.LabelValue(fmt.Sprintf("testmetric_%d", i)), - model.JobLabel: "testjob", - }, - Values: make([]model.SamplePair, 0, samplesPerSeries), - } - for j := 0; j < samplesPerSeries; j++ { - ss.Values = append(ss.Values, model.SamplePair{ - Timestamp: model.Time(i + j + offset), - Value: model.SampleValue(i + j + offset), - }) - } - m = append(m, &ss) - } - sort.Sort(m) - return m -} - -func TestQueryResponse(t *testing.T) { - want := buildTestMatrix(10, 10, 10) - have := FromQueryResponse(ToQueryResponse(want)) - if !reflect.DeepEqual(have, want) { - t.Fatalf("Bad FromQueryResponse(ToQueryResponse) round trip") - } -} - // The main usecase for `LabelsToKeyString` is to generate hashKeys // for maps. We are benchmarking that here. func BenchmarkSeriesMap(b *testing.B) { diff --git a/pkg/querier/batch/batch.go b/pkg/querier/batch/batch.go index cc6bf466ea4..ca7e1f79eee 100644 --- a/pkg/querier/batch/batch.go +++ b/pkg/querier/batch/batch.go @@ -162,6 +162,11 @@ func (a *iteratorAdapter) AtHistogram(h *histogram.Histogram) (int64, *histogram // AtFloatHistogram implements chunkenc.Iterator. func (a *iteratorAdapter) AtFloatHistogram(h *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { + // PromQL engine always selects float histogram in its implementation so might call AtFloatHistogram + // even if it is a histogram. https://github.com/prometheus/prometheus/blob/v2.53.0/promql/engine.go#L2276 + if a.curr.ValType == chunkenc.ValHistogram { + return a.curr.Timestamps[a.curr.Index], a.curr.Histograms[a.curr.Index].ToFloat(h) + } return a.curr.Timestamps[a.curr.Index], a.curr.FloatHistograms[a.curr.Index] } diff --git a/pkg/querier/batch/batch_test.go b/pkg/querier/batch/batch_test.go index ffd2c248b2c..0542f47ddb0 100644 --- a/pkg/querier/batch/batch_test.go +++ b/pkg/querier/batch/batch_test.go @@ -147,6 +147,11 @@ func TestSeekCorrectlyDealWithSinglePointChunks(t *testing.T) { actual, val := sut.AtHistogram(nil) require.Equal(t, histograms[0], val) require.Equal(t, int64(1*time.Second/time.Millisecond), actual) + + // Histogram chunk should support querying float histograms since it is what Query Engine does. + actualT, fh := sut.AtFloatHistogram(nil) + require.Equal(t, histograms[0].ToFloat(nil), fh) + require.Equal(t, int64(1*time.Second/time.Millisecond), actualT) case promchunk.PrometheusFloatHistogramChunk: actual, val := sut.AtFloatHistogram(nil) require.Equal(t, histograms[0].ToFloat(nil), val) diff --git a/pkg/querier/block_test.go b/pkg/querier/block_test.go deleted file mode 100644 index 649d8fa01de..00000000000 --- a/pkg/querier/block_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package querier - -import ( - "sort" - - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/thanos-io/thanos/pkg/store/storepb" -) - -func createAggrChunkWithSamples(samples ...promql.FPoint) storepb.AggrChunk { - return createAggrChunk(samples[0].T, samples[len(samples)-1].T, samples...) -} - -func createAggrChunk(minTime, maxTime int64, samples ...promql.FPoint) storepb.AggrChunk { - // Ensure samples are sorted by timestamp. - sort.Slice(samples, func(i, j int) bool { - return samples[i].T < samples[j].T - }) - - chunk := chunkenc.NewXORChunk() - appender, err := chunk.Appender() - if err != nil { - panic(err) - } - - for _, s := range samples { - appender.Append(s.T, s.F) - } - - return storepb.AggrChunk{ - MinTime: minTime, - MaxTime: maxTime, - Raw: &storepb.Chunk{ - Type: storepb.Chunk_XOR, - Data: chunk.Bytes(), - }, - } -} diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 3ff14fbca16..f80914e9e2b 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -3,7 +3,6 @@ package querier import ( "context" "fmt" - "io" "sort" "strings" @@ -17,6 +16,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" @@ -41,6 +41,7 @@ import ( "github.com/cortexproject/cortex/pkg/storegateway" "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" "github.com/cortexproject/cortex/pkg/util" + histogram_util "github.com/cortexproject/cortex/pkg/util/histogram" "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" @@ -56,19 +57,27 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { ) var ( - block1 = ulid.MustNew(1, nil) - block2 = ulid.MustNew(2, nil) - block3 = ulid.MustNew(3, nil) - block4 = ulid.MustNew(4, nil) - metricNameLabel = labels.Label{Name: labels.MetricName, Value: metricName} - series1Label = labels.Label{Name: "series", Value: "1"} - series2Label = labels.Label{Name: "series", Value: "2"} - noOpQueryLimiter = limiter.NewQueryLimiter(0, 0, 0, 0) + block1 = ulid.MustNew(1, nil) + block2 = ulid.MustNew(2, nil) + block3 = ulid.MustNew(3, nil) + block4 = ulid.MustNew(4, nil) + metricNameLabel = labels.Label{Name: labels.MetricName, Value: metricName} + series1Label = labels.Label{Name: "series", Value: "1"} + series2Label = labels.Label{Name: "series", Value: "2"} + noOpQueryLimiter = limiter.NewQueryLimiter(0, 0, 0, 0) + testHistogram1 = histogram_util.GenerateTestHistogram(1) + testHistogram2 = histogram_util.GenerateTestHistogram(2) + testHistogram3 = histogram_util.GenerateTestHistogram(3) + testFloatHistogram1 = histogram_util.GenerateTestFloatHistogram(1) + testFloatHistogram2 = histogram_util.GenerateTestFloatHistogram(2) + testFloatHistogram3 = histogram_util.GenerateTestFloatHistogram(3) ) type valueResult struct { - t int64 - v float64 + t int64 + v float64 + h *histogram.Histogram + fh *histogram.FloatHistogram } type seriesResult struct { @@ -118,7 +127,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}...), + mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -135,6 +144,70 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, }, }, + "a single store-gateway instance holds the required blocks (single returned histogram series)": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel}, + nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram1), + cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), + }, nil, + ), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: labels.New(metricNameLabel), + values: []valueResult{ + {t: minT, h: testHistogram1}, + {t: minT + 1, h: testHistogram2}, + }, + }, + }, + }, + "a single store-gateway instance holds the required blocks (single returned float histogram series)": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel}, + nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), + cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), + }, + ), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: labels.New(metricNameLabel), + values: []valueResult{ + {t: minT, fh: testFloatHistogram1}, + {t: minT + 1, fh: testFloatHistogram2}, + }, + }, + }, + }, "a single store-gateway instance holds the required blocks (multiple returned series)": { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, @@ -143,8 +216,8 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}...), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, cortexpb.Sample{Value: 3, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 3, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -166,6 +239,94 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, }, }, + "a single store-gateway instance holds the required blocks (multiple returned histogram series)": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel, series1Label}, + nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram1), + cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), + }, nil, + ), + mockSeriesResponse( + labels.Labels{metricNameLabel, series2Label}, + nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram3), + }, nil, + ), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: labels.New(metricNameLabel, series1Label), + values: []valueResult{ + {t: minT, h: testHistogram1}, + {t: minT + 1, h: testHistogram2}, + }, + }, { + lbls: labels.New(metricNameLabel, series2Label), + values: []valueResult{ + {t: minT, h: testHistogram3}, + }, + }, + }, + }, + "a single store-gateway instance holds the required blocks (multiple returned float histogram series)": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel, series1Label}, + nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), + cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), + }, + ), + mockSeriesResponse( + labels.Labels{metricNameLabel, series2Label}, + nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram3), + }, + ), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: labels.New(metricNameLabel, series1Label), + values: []valueResult{ + {t: minT, fh: testFloatHistogram1}, + {t: minT + 1, fh: testFloatHistogram2}, + }, + }, { + lbls: labels.New(metricNameLabel, series2Label), + values: []valueResult{ + {t: minT, fh: testFloatHistogram3}, + }, + }, + }, + }, "multiple store-gateway instances holds the required blocks without overlapping series (single returned series)": { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, @@ -174,11 +335,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, cortexpb.Sample{Value: 1, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, cortexpb.Sample{Value: 2, TimestampMs: minT + 1}), + mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, }, @@ -195,6 +356,88 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, }, }, + "multiple store-gateway instances holds the required blocks without overlapping series (single returned histogram series)": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel}, + nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram1), + }, nil, + ), + mockHintsResponse(block1), + }}: {block1}, + &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel}, + nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), + }, nil, + ), + mockHintsResponse(block2), + }}: {block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: labels.New(metricNameLabel), + values: []valueResult{ + {t: minT, h: testHistogram1}, + {t: minT + 1, h: testHistogram2}, + }, + }, + }, + }, + "multiple store-gateway instances holds the required blocks without overlapping series (single returned float histogram series)": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel}, + nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), + }, + ), + mockHintsResponse(block1), + }}: {block1}, + &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel}, + nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), + }, + ), + mockHintsResponse(block2), + }}: {block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: labels.New(metricNameLabel), + values: []valueResult{ + {t: minT, fh: testFloatHistogram1}, + {t: minT + 1, fh: testFloatHistogram2}, + }, + }, + }, + }, "multiple store-gateway instances holds the required blocks with overlapping series (single returned series)": { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, @@ -203,11 +446,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, cortexpb.Sample{Value: 2, TimestampMs: minT + 1}), + mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}...), + mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, }, @@ -224,6 +467,90 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, }, }, + "multiple store-gateway instances holds the required blocks with overlapping series (single returned histogram series)": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel}, + nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), + }, nil, + ), + mockHintsResponse(block1), + }}: {block1}, + &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel}, + nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram1), + cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), + }, nil, + ), + mockHintsResponse(block2), + }}: {block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: labels.New(metricNameLabel), + values: []valueResult{ + {t: minT, h: testHistogram1}, + {t: minT + 1, h: testHistogram2}, + }, + }, + }, + }, + "multiple store-gateway instances holds the required blocks with overlapping series (single returned float histogram series)": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel}, + nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), + }, + ), + mockHintsResponse(block1), + }}: {block1}, + &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel}, + nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), + cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), + }, + ), + mockHintsResponse(block2), + }}: {block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: labels.New(metricNameLabel), + values: []valueResult{ + {t: minT, fh: testFloatHistogram1}, + {t: minT + 1, fh: testFloatHistogram2}, + }, + }, + }, + }, "multiple store-gateway instances holds the required blocks with overlapping series (multiple returned series)": { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, @@ -232,16 +559,16 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT + 1}), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, cortexpb.Sample{Value: 1, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}...), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 3, TimestampMs: minT + 1}}...), + mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3}, }, @@ -291,6 +618,138 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { cortex_querier_storegateway_refetches_per_query_count 1 `, }, + "multiple store-gateway instances holds the required blocks with overlapping series (multiple returned histogram series)": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel, series1Label}, + nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), + }, nil, + ), + mockSeriesResponse( + labels.Labels{metricNameLabel, series2Label}, + nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram1), + }, nil, + ), + mockHintsResponse(block1), + }}: {block1}, + &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel, series1Label}, + nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram1), + cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), + }, nil, + ), + mockHintsResponse(block2), + }}: {block2}, + &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel, series2Label}, + nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram1), + cortexpb.HistogramToHistogramProto(minT+1, testHistogram3), + }, nil, + ), + mockHintsResponse(block3), + }}: {block3}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: labels.New(metricNameLabel, series1Label), + values: []valueResult{ + {t: minT, h: testHistogram1}, + {t: minT + 1, h: testHistogram2}, + }, + }, { + lbls: labels.New(metricNameLabel, series2Label), + values: []valueResult{ + {t: minT, h: testHistogram1}, + {t: minT + 1, h: testHistogram3}, + }, + }, + }, + }, + "multiple store-gateway instances holds the required blocks with overlapping series (multiple returned float histogram series)": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel, series1Label}, + nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), + }, + ), + mockSeriesResponse( + labels.Labels{metricNameLabel, series2Label}, + nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), + }, + ), + mockHintsResponse(block1), + }}: {block1}, + &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel, series1Label}, + nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), + cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), + }, + ), + mockHintsResponse(block2), + }}: {block2}, + &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse( + labels.Labels{metricNameLabel, series2Label}, + nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), + cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram3), + }, + ), + mockHintsResponse(block3), + }}: {block3}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: labels.New(metricNameLabel, series1Label), + values: []valueResult{ + {t: minT, fh: testFloatHistogram1}, + {t: minT + 1, fh: testFloatHistogram2}, + }, + }, { + lbls: labels.New(metricNameLabel, series2Label), + values: []valueResult{ + {t: minT, fh: testFloatHistogram1}, + {t: minT + 1, fh: testFloatHistogram3}, + }, + }, + }, + }, "a single store-gateway instance has some missing blocks (consistency check failed)": { finderResult: bucketindex.Blocks{ &bucketindex.Block{ID: block1}, @@ -300,7 +759,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}...), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -322,11 +781,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, cortexpb.Sample{Value: 2, TimestampMs: minT + 1}), + mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, cortexpb.Sample{Value: 2, TimestampMs: minT + 1}), + mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, }, @@ -348,25 +807,25 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 1, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1, block3}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, cortexpb.Sample{Value: 2, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block2), }}: {block2, block4}, }, // Second attempt returns 1 missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT + 1}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3, block4}, }, // Third attempt returns the last missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "4.4.4.4", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, cortexpb.Sample{Value: 3, TimestampMs: minT + 1}), + mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block4), }}: {block4}, }, @@ -424,7 +883,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}...), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -449,7 +908,47 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}...), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{maxChunksPerQuery: 1}, + queryLimiter: noOpQueryLimiter, + expectedErr: validation.LimitError(fmt.Sprintf(errMaxChunksPerQueryLimit, fmt.Sprintf("{__name__=%q}", metricName), 1)), + }, + "max chunks per query limit hit while fetching histogram chunks at first attempt": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram1), + cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), + }, nil), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{maxChunksPerQuery: 1}, + queryLimiter: noOpQueryLimiter, + expectedErr: validation.LimitError(fmt.Sprintf(errMaxChunksPerQueryLimit, fmt.Sprintf("{__name__=%q}", metricName), 1)), + }, + "max chunks per query limit hit while fetching float histogram chunks at first attempt": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), + cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), + }), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -466,7 +965,47 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}...), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: limiter.NewQueryLimiter(0, 0, 1, 0), + expectedErr: validation.LimitError(fmt.Sprintf(limiter.ErrMaxChunksPerQueryLimit, 1)), + }, + "max chunks per query limit hit while fetching histogram chunks at first attempt - global limit": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram1), + cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), + }, nil), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: limiter.NewQueryLimiter(0, 0, 1, 0), + expectedErr: validation.LimitError(fmt.Sprintf(limiter.ErrMaxChunksPerQueryLimit, 1)), + }, + "max chunks per query limit hit while fetching float histogram chunks at first attempt - global limit": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), + cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), + }), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -486,25 +1025,25 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 1, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1, block3}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, cortexpb.Sample{Value: 2, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block2), }}: {block2, block4}, }, // Second attempt returns 1 missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT + 1}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3, block4}, }, // Third attempt returns the last missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "4.4.4.4", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, cortexpb.Sample{Value: 3, TimestampMs: minT + 1}), + mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block4), }}: {block4}, }, @@ -524,25 +1063,25 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 1, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1, block3}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, cortexpb.Sample{Value: 2, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block2), }}: {block2, block4}, }, // Second attempt returns 1 missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT + 1}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3, block4}, }, // Third attempt returns the last missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "4.4.4.4", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, cortexpb.Sample{Value: 3, TimestampMs: minT + 1}), + mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block4), }}: {block4}, }, @@ -559,8 +1098,60 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 1, TimestampMs: minT}), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, cortexpb.Sample{Value: 2, TimestampMs: minT + 1}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: limiter.NewQueryLimiter(1, 0, 0, 0), + expectedErr: validation.LimitError(fmt.Sprintf(limiter.ErrMaxSeriesHit, 1)), + }, + "max series per query limit hit while fetching histogram chunks": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram1), + }, nil, + ), + mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, nil, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), + }, nil, + ), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: limiter.NewQueryLimiter(1, 0, 0, 0), + expectedErr: validation.LimitError(fmt.Sprintf(limiter.ErrMaxSeriesHit, 1)), + }, + "max series per query limit hit while fetching float histogram chunks": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), + }, + ), + mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, nil, nil, + []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), + }, + ), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -577,7 +1168,45 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}...), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{maxChunksPerQuery: 0}, + queryLimiter: limiter.NewQueryLimiter(0, 8, 0, 0), + expectedErr: validation.LimitError(fmt.Sprintf(limiter.ErrMaxChunkBytesHit, 8)), + }, + "max chunk bytes per query limit hit while fetching histogram chunks": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram1), + }, nil), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{maxChunksPerQuery: 0}, + queryLimiter: limiter.NewQueryLimiter(0, 8, 0, 0), + expectedErr: validation.LimitError(fmt.Sprintf(limiter.ErrMaxChunkBytesHit, 8)), + }, + "max chunk bytes per query limit hit while fetching float histogram chunks": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), + }), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -594,7 +1223,45 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}...), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{maxChunksPerQuery: 0}, + queryLimiter: limiter.NewQueryLimiter(0, 0, 0, 1), + expectedErr: validation.LimitError(fmt.Sprintf(limiter.ErrMaxDataBytesHit, 1)), + }, + "max data bytes per query limit hit while fetching histogram chunks": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(minT, testHistogram1), + }, nil), + mockHintsResponse(block1, block2), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{maxChunksPerQuery: 0}, + queryLimiter: limiter.NewQueryLimiter(0, 0, 0, 1), + expectedErr: validation.LimitError(fmt.Sprintf(limiter.ErrMaxDataBytesHit, 1)), + }, + "max data bytes per query limit hit while fetching float histogram chunks": { + finderResult: bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), + }), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -616,7 +1283,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -645,7 +1312,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -674,7 +1341,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -700,7 +1367,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &storeGatewayClientMock{ remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }, mockedSeriesStreamErr: status.Error(codes.PermissionDenied, "PermissionDenied"), @@ -710,7 +1377,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &storeGatewayClientMock{ remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }, mockedSeriesStreamErr: status.Error(codes.PermissionDenied, "PermissionDenied"), @@ -738,13 +1405,13 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { remoteAddr: "1.1.1.1", mockedSeriesStreamErr: status.Error(codes.Unavailable, "unavailable"), mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -773,7 +1440,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -802,7 +1469,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, cortexpb.Sample{Value: 2, TimestampMs: minT}), + mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -858,12 +1525,28 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { var actualValues []valueResult it = set.At().Iterator(it) - for it.Next() != chunkenc.ValNone { - t, v := it.At() - actualValues = append(actualValues, valueResult{ - t: t, - v: v, - }) + for valType := it.Next(); valType != chunkenc.ValNone; valType = it.Next() { + switch valType { + case chunkenc.ValFloat: + t, v := it.At() + actualValues = append(actualValues, valueResult{ + t: t, + v: v, + }) + case chunkenc.ValHistogram: + t, v := it.AtHistogram(nil) + actualValues = append(actualValues, valueResult{ + t: t, + h: v, + }) + case chunkenc.ValFloatHistogram: + t, v := it.AtFloatHistogram(nil) + actualValues = append(actualValues, valueResult{ + t: t, + fh: v, + }) + default: + } } require.NoError(t, it.Err()) @@ -1485,124 +2168,130 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { series1 := []labelpb.ZLabel{{Name: "__name__", Value: "metric_1"}} series2 := []labelpb.ZLabel{{Name: "__name__", Value: "metric_2"}} - series1Samples := []promql.FPoint{ - {T: 1589759955000, F: 1}, - {T: 1589759970000, F: 1}, - {T: 1589759985000, F: 1}, - {T: 1589760000000, F: 1}, - {T: 1589760015000, F: 1}, - {T: 1589760030000, F: 1}, - } + from := model.Time(1589759955000) - series2Samples := []promql.FPoint{ - {T: 1589759955000, F: 2}, - {T: 1589759970000, F: 2}, - {T: 1589759985000, F: 2}, - {T: 1589760000000, F: 2}, - {T: 1589760015000, F: 2}, - {T: 1589760030000, F: 2}, - } for _, thanosEngine := range []bool{false, true} { - t.Run(fmt.Sprintf("thanos engine enabled=%t", thanosEngine), func(t *testing.T) { - var queryEngine promql.QueryEngine - if thanosEngine { - queryEngine = engine.New(engine.Opts{ - EngineOpts: opts, - LogicalOptimizers: logicalplan.AllOptimizers, - }) - } else { + for _, enc := range encodings { + t.Run(fmt.Sprintf("thanos engine enabled=%t, enc=%s", thanosEngine, enc.String()), func(t *testing.T) { + var queryEngine promql.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } queryEngine = promql.NewEngine(opts) - } - // Mock the finder to simulate we need to query two blocks. - finder := &blocksFinderMock{ - Service: services.NewIdleService(nil, nil), - } - finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything).Return(bucketindex.Blocks{ - &bucketindex.Block{ID: block1}, - &bucketindex.Block{ID: block2}, - }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) - - // Mock the store to simulate each block is queried from a different store-gateway. - gateway1 := &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - { - Result: &storepb.SeriesResponse_Series{ - Series: &storepb.Series{ - Labels: series1, - Chunks: []storepb.AggrChunk{ - createAggrChunkWithSamples(series1Samples[:3]...), // First half. + // Mock the finder to simulate we need to query two blocks. + finder := &blocksFinderMock{ + Service: services.NewIdleService(nil, nil), + } + finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything).Return(bucketindex.Blocks{ + &bucketindex.Block{ID: block1}, + &bucketindex.Block{ID: block2}, + }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) + + // Mock the store to simulate each block is queried from a different store-gateway. + gateway1 := &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + { + Result: &storepb.SeriesResponse_Series{ + Series: &storepb.Series{ + Labels: series1, + Chunks: []storepb.AggrChunk{ + createAggrChunk(t, 15*time.Second, from, 3, enc), + }, }, }, }, - }, { - Result: &storepb.SeriesResponse_Series{ - Series: &storepb.Series{ - Labels: series2, - Chunks: []storepb.AggrChunk{ - createAggrChunkWithSamples(series2Samples[:3]...), + { + Result: &storepb.SeriesResponse_Series{ + Series: &storepb.Series{ + Labels: series2, + Chunks: []storepb.AggrChunk{ + createAggrChunk(t, 15*time.Second, from, 3, enc), + }, }, }, }, - }, - mockHintsResponse(block1), - }} - - gateway2 := &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - { - Result: &storepb.SeriesResponse_Series{ - Series: &storepb.Series{ - Labels: series1, - Chunks: []storepb.AggrChunk{ - createAggrChunkWithSamples(series1Samples[3:]...), // Second half. + mockHintsResponse(block1), + }} + + gateway2 := &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ + { + Result: &storepb.SeriesResponse_Series{ + Series: &storepb.Series{ + Labels: series1, + Chunks: []storepb.AggrChunk{ + createAggrChunk(t, 15*time.Second, from+45000, 3, enc), + }, }, }, - }, - }, { - Result: &storepb.SeriesResponse_Series{ - Series: &storepb.Series{ - Labels: series2, - Chunks: []storepb.AggrChunk{ - createAggrChunkWithSamples(series2Samples[3:]...), + }, { + Result: &storepb.SeriesResponse_Series{ + Series: &storepb.Series{ + Labels: series2, + Chunks: []storepb.AggrChunk{ + createAggrChunk(t, 15*time.Second, from+45000, 3, enc), + }, }, }, }, - }, - mockHintsResponse(block2), - }} - - stores := &blocksStoreSetMock{ - Service: services.NewIdleService(nil, nil), - mockedResponses: []interface{}{ - map[BlocksStoreClient][]ulid.ULID{ - gateway1: {block1}, - gateway2: {block2}, + mockHintsResponse(block2), + }} + + stores := &blocksStoreSetMock{ + Service: services.NewIdleService(nil, nil), + mockedResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + gateway1: {block1}, + gateway2: {block2}, + }, }, - }, - } - - // Instance the querier that will be executed to run the query. - queryable, err := NewBlocksStoreQueryable(stores, finder, NewBlocksConsistencyChecker(0, 0, logger, nil), &blocksStoreLimitsMock{}, 0, false, logger, nil) - require.NoError(t, err) - require.NoError(t, services.StartAndAwaitRunning(context.Background(), queryable)) - defer services.StopAndAwaitTerminated(context.Background(), queryable) // nolint:errcheck - - // Run a query. - ctx := user.InjectOrgID(context.Background(), "user-1") - q, err := queryEngine.NewRangeQuery(ctx, queryable, nil, `{__name__=~"metric.*"}`, time.Unix(1589759955, 0), time.Unix(1589760030, 0), 15*time.Second) - require.NoError(t, err) - - res := q.Exec(ctx) - require.NoError(t, err) - require.NoError(t, res.Err) + } - matrix, err := res.Matrix() - require.NoError(t, err) - require.Len(t, matrix, 2) + // Instance the querier that will be executed to run the query. + queryable, err := NewBlocksStoreQueryable(stores, finder, NewBlocksConsistencyChecker(0, 0, logger, nil), &blocksStoreLimitsMock{}, 0, false, logger, nil) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), queryable)) + defer services.StopAndAwaitTerminated(context.Background(), queryable) // nolint:errcheck - assert.Equal(t, labelpb.ZLabelsToPromLabels(series1), matrix[0].Metric) - assert.Equal(t, labelpb.ZLabelsToPromLabels(series2), matrix[1].Metric) - assert.Equal(t, series1Samples, matrix[0].Floats) - assert.Equal(t, series2Samples, matrix[1].Floats) - }) + // Run a query. + ctx := user.InjectOrgID(context.Background(), "user-1") + q, err := queryEngine.NewRangeQuery(ctx, queryable, nil, `{__name__=~"metric.*"}`, time.Unix(1589759955, 0), time.Unix(1589760030, 0), 15*time.Second) + require.NoError(t, err) + + res := q.Exec(ctx) + require.NoError(t, err) + require.NoError(t, res.Err) + + matrix, err := res.Matrix() + require.NoError(t, err) + require.Len(t, matrix, 2) + + require.Equal(t, labelpb.ZLabelsToPromLabels(series1), matrix[0].Metric) + require.Equal(t, labelpb.ZLabelsToPromLabels(series2), matrix[1].Metric) + for _, m := range matrix { + for i, f := range m.Floats { + // Check sample timestamp is expected. + require.Equal(t, f.T, int64(from)+int64(i)*15000) + require.Equal(t, f.T, int64(f.F)) + } + for i, h := range m.Histograms { + h := h + // Check sample timestamp is expected. + require.Equal(t, h.T, int64(from)+int64(i)*15000) + if enc == encoding.PrometheusHistogramChunk { + // GenerateTestHistogram will add 10 for the input value i so subtract 10 here. + expectedH := histogram_util.GenerateTestHistogram(int(h.T - 10)) + require.Equal(t, expectedH.ToFloat(nil), h.H) + } else if enc == encoding.PrometheusFloatHistogramChunk { + // GenerateTestHistogram will add 10 for the input value i so subtract 10 here. + expectedH := histogram_util.GenerateTestFloatHistogram(int(h.T - 10)) + require.Equal(t, expectedH, h.H) + } + } + } + }) + } } } @@ -1717,7 +2406,7 @@ func (m *blocksStoreLimitsMock) S3SSEKMSEncryptionContext(_ string) string { return "" } -func mockSeriesResponse(lbls labels.Labels, samples ...cortexpb.Sample) *storepb.SeriesResponse { +func mockSeriesResponse(lbls labels.Labels, samples []cortexpb.Sample, histograms []cortexpb.Histogram, floatHistograms []cortexpb.Histogram) *storepb.SeriesResponse { res := &storepb.SeriesResponse_Series{ Series: &storepb.Series{ Labels: labelpb.ZLabelsFromPromLabels(lbls), @@ -1741,6 +2430,42 @@ func mockSeriesResponse(lbls labels.Labels, samples ...cortexpb.Sample) *storepb }) } + for _, h := range histograms { + chunk := chunkenc.NewHistogramChunk() + appender, err := chunk.Appender() + if err != nil { + panic(err) + } + _, _, _, err = appender.AppendHistogram(nil, h.TimestampMs, cortexpb.HistogramProtoToHistogram(h), true) + if err != nil { + panic(err) + } + chunkData := chunk.Bytes() + res.Series.Chunks = append(res.Series.Chunks, storepb.AggrChunk{ + MinTime: h.TimestampMs, + MaxTime: h.TimestampMs, + Raw: &storepb.Chunk{Type: storepb.Chunk_HISTOGRAM, Data: chunkData}, + }) + } + + for _, fh := range floatHistograms { + chunk := chunkenc.NewFloatHistogramChunk() + appender, err := chunk.Appender() + if err != nil { + panic(err) + } + _, _, _, err = appender.AppendFloatHistogram(nil, fh.TimestampMs, cortexpb.FloatHistogramProtoToFloatHistogram(fh), true) + if err != nil { + panic(err) + } + chunkData := chunk.Bytes() + res.Series.Chunks = append(res.Series.Chunks, storepb.AggrChunk{ + MinTime: fh.TimestampMs, + MaxTime: fh.TimestampMs, + Raw: &storepb.Chunk{Type: storepb.Chunk_FLOAT_HISTOGRAM, Data: chunkData}, + }) + } + return &storepb.SeriesResponse{Result: res} } @@ -1910,3 +2635,23 @@ func TestCountSamplesAndChunks(t *testing.T) { }) } } + +func createAggrChunk(t *testing.T, step time.Duration, from model.Time, points int, enc encoding.Encoding) storepb.AggrChunk { + c := util.GenerateChunk(t, step, from, points, enc) + chunkType := storepb.Chunk_XOR + switch enc { + case encoding.PrometheusHistogramChunk: + chunkType = storepb.Chunk_HISTOGRAM + case encoding.PrometheusFloatHistogramChunk: + chunkType = storepb.Chunk_FLOAT_HISTOGRAM + default: + } + return storepb.AggrChunk{ + MinTime: int64(c.From), + MaxTime: int64(c.Through), + Raw: &storepb.Chunk{ + Type: chunkType, + Data: c.Data.Bytes(), + }, + } +} diff --git a/pkg/querier/chunk_store_queryable_test.go b/pkg/querier/chunk_store_queryable_test.go index 2d32769a525..1ecbb438d6d 100644 --- a/pkg/querier/chunk_store_queryable_test.go +++ b/pkg/querier/chunk_store_queryable_test.go @@ -2,6 +2,7 @@ package querier import ( "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/chunk" @@ -18,14 +19,18 @@ func (m mockChunkStore) Get() ([]chunk.Chunk, error) { } func makeMockChunkStore(t require.TestingT, numChunks int, enc encoding.Encoding) (mockChunkStore, model.Time) { + chks, from := makeMockChunks(t, numChunks, enc, 0) + return mockChunkStore{chks}, from +} + +func makeMockChunks(t require.TestingT, numChunks int, enc encoding.Encoding, from model.Time, additionalLabels ...labels.Label) ([]chunk.Chunk, model.Time) { var ( chunks = make([]chunk.Chunk, 0, numChunks) - from = model.Time(0) ) for i := 0; i < numChunks; i++ { - c := util.GenerateChunk(t, sampleRate, from, int(samplesPerChunk), enc) + c := util.GenerateChunk(t, sampleRate, from, int(samplesPerChunk), enc, additionalLabels...) chunks = append(chunks, c) from = from.Add(chunkOffset) } - return mockChunkStore{chunks}, from + return chunks, from } diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index 58dd56b8618..6740d1406b5 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -9,7 +9,6 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -146,56 +145,55 @@ func TestDistributorQueryableFilter(t *testing.T) { func TestIngesterStreaming(t *testing.T) { t.Parallel() - // We need to make sure that there is at least one chunk present, - // else no series will be selected. - promChunk := chunkenc.NewXORChunk() - appender, err := promChunk.Appender() - require.NoError(t, err) - appender.Append(int64(model.ZeroSamplePair.Timestamp), float64(model.ZeroSamplePair.Value)) - - clientChunks, err := chunkcompat.ToChunks([]chunk.Chunk{ - chunk.NewChunk(nil, promChunk, model.Earliest, model.Earliest), - }) - require.NoError(t, err) - - d := &MockDistributor{} - d.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( - &client.QueryStreamResponse{ - Chunkseries: []client.TimeSeriesChunk{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "bar", Value: "baz"}, + now := time.Now() + for _, enc := range encodings { + promChunk := util.GenerateChunk(t, time.Second, model.TimeFromUnix(now.Unix()), 10, enc) + clientChunks, err := chunkcompat.ToChunks([]chunk.Chunk{promChunk}) + require.NoError(t, err) + + d := &MockDistributor{} + d.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( + &client.QueryStreamResponse{ + Chunkseries: []client.TimeSeriesChunk{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: "bar", Value: "baz"}, + }, + Chunks: clientChunks, }, - Chunks: clientChunks, - }, - { - Labels: []cortexpb.LabelAdapter{ - {Name: "foo", Value: "bar"}, + { + Labels: []cortexpb.LabelAdapter{ + {Name: "foo", Value: "bar"}, + }, + Chunks: clientChunks, }, - Chunks: clientChunks, }, }, - }, - nil) - - ctx := user.InjectOrgID(context.Background(), "0") - queryable := newDistributorQueryable(d, true, batch.NewChunkMergeIterator, 0) - querier, err := queryable.Querier(mint, maxt) - require.NoError(t, err) - - seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}) - require.NoError(t, seriesSet.Err()) - - require.True(t, seriesSet.Next()) - series := seriesSet.At() - require.Equal(t, labels.Labels{{Name: "bar", Value: "baz"}}, series.Labels()) - - require.True(t, seriesSet.Next()) - series = seriesSet.At() - require.Equal(t, labels.Labels{{Name: "foo", Value: "bar"}}, series.Labels()) - - require.False(t, seriesSet.Next()) - require.NoError(t, seriesSet.Err()) + nil) + + ctx := user.InjectOrgID(context.Background(), "0") + queryable := newDistributorQueryable(d, true, batch.NewChunkMergeIterator, 0) + querier, err := queryable.Querier(mint, maxt) + require.NoError(t, err) + + seriesSet := querier.Select(ctx, true, &storage.SelectHints{Start: mint, End: maxt}) + require.NoError(t, seriesSet.Err()) + + require.True(t, seriesSet.Next()) + series := seriesSet.At() + require.Equal(t, labels.Labels{{Name: "bar", Value: "baz"}}, series.Labels()) + chkIter := series.Iterator(nil) + require.Equal(t, enc.ChunkValueType(), chkIter.Next()) + + require.True(t, seriesSet.Next()) + series = seriesSet.At() + require.Equal(t, labels.Labels{{Name: "foo", Value: "bar"}}, series.Labels()) + chkIter = series.Iterator(chkIter) + require.Equal(t, enc.ChunkValueType(), chkIter.Next()) + + require.False(t, seriesSet.Next()) + require.NoError(t, seriesSet.Err()) + } } func TestDistributorQuerier_LabelNames(t *testing.T) { diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index bbb46e88a26..ac01c5e9ce6 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -375,20 +375,21 @@ func (q querier) Select(ctx context.Context, sortSeries bool, sp *storage.Select // so we make sure changes are reflected back to hints. sp.Start = startMs sp.End = endMs + getSeries := sp.Func == "series" // For series queries without specifying the start time, we prefer to // only query ingesters and not to query maxQueryLength to avoid OOM kill. - if sp.Func == "series" && startMs == 0 { + if getSeries && startMs == 0 { return metadataQuerier.Select(ctx, true, sp, matchers...) } startTime := model.Time(startMs) endTime := model.Time(endMs) - // Validate query time range. This validation should be done only for instant / range queries and - // NOT for metadata queries (series, labels) because the query-frontend doesn't support splitting - // of such queries. - if !q.ignoreMaxQueryLength { + // Validate query time range. This validation for instant / range queries can be done either at Query Frontend + // or here at Querier. When the check is done at Query Frontend, we still want to enforce the max query length + // check for /api/v1/series request since there is no specific tripperware for series. + if !q.ignoreMaxQueryLength || getSeries { if maxQueryLength := q.limits.MaxQueryLength(userID); maxQueryLength > 0 && endTime.Sub(startTime) > maxQueryLength { limitErr := validation.LimitError(fmt.Sprintf(validation.ErrQueryTooLong, endTime.Sub(startTime), maxQueryLength)) return storage.ErrSeriesSet(limitErr) diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 55d5ec02014..e53cac4e3e6 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -11,12 +11,13 @@ import ( "github.com/go-kit/log" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + promutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/util/annotations" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -33,10 +34,8 @@ import ( "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/chunkcompat" "github.com/cortexproject/cortex/pkg/util/flagext" + histogram_util "github.com/cortexproject/cortex/pkg/util/histogram" "github.com/cortexproject/cortex/pkg/util/validation" - - "github.com/prometheus/client_golang/prometheus" - promutil "github.com/prometheus/client_golang/prometheus/testutil" ) const ( @@ -69,11 +68,12 @@ func (q *wrappedSampleAndChunkQueryable) Querier(mint, maxt int64) (storage.Quer } type query struct { - query string - labels labels.Labels - samples func(from, through time.Time, step time.Duration) int - expected func(t int64) (int64, float64) - step time.Duration + query string + labels labels.Labels + samples func(from, through time.Time, step time.Duration) int + expectedFunc func(t testing.TB, q query, end model.Time, enc promchunk.Encoding, series promql.Series) + step time.Duration + encodings []promchunk.Encoding } var ( @@ -93,9 +93,24 @@ var ( samples: func(from, through time.Time, step time.Duration) int { return int(through.Sub(from) / step) }, - expected: func(t int64) (int64, float64) { - return t + int64((sampleRate*4)/time.Millisecond), 1000.0 + expectedFunc: func(t testing.TB, q query, end model.Time, enc promchunk.Encoding, series promql.Series) { + var ts int64 + expected := func(t int64) (int64, float64) { + return t + int64((sampleRate*4)/time.Millisecond), 1000.0 + } + from, through := time.Unix(0, 0), end.Time() + require.Equal(t, q.samples(from, through, q.step), len(series.Floats)) + for i, point := range series.Floats { + expectedTime, expectedValue := expected(ts) + require.Equal(t, expectedTime, point.T, strconv.Itoa(i)) + require.Equal(t, expectedValue, point.F, strconv.Itoa(i)) + ts += int64(q.step / time.Millisecond) + } }, + // Skip running the query for non XOR chunk encoding to avoid putting + // histogram rate calculation logic here. + // We can test histogram queries using different promql functions. + encodings: []promchunk.Encoding{promchunk.PrometheusXorChunk}, }, // Very simple single-point gets, with low step. Performance should be @@ -109,8 +124,27 @@ var ( samples: func(from, through time.Time, step time.Duration) int { return int(through.Sub(from)/step) + 1 }, - expected: func(t int64) (int64, float64) { - return t, float64(t) + expectedFunc: func(t testing.TB, q query, end model.Time, enc promchunk.Encoding, series promql.Series) { + var ts int64 + from, through := time.Unix(0, 0), end.Time() + switch enc { + case promchunk.PrometheusXorChunk: + require.Equal(t, q.samples(from, through, q.step), len(series.Floats)) + for i, point := range series.Floats { + require.Equal(t, ts, point.T, strconv.Itoa(i)) + require.Equal(t, float64(ts), point.F, strconv.Itoa(i)) + ts += int64(q.step / time.Millisecond) + } + case promchunk.PrometheusHistogramChunk, promchunk.PrometheusFloatHistogramChunk: + require.Equal(t, q.samples(from, through, q.step), len(series.Histograms)) + for i, point := range series.Histograms { + require.Equal(t, ts, point.T, strconv.Itoa(i)) + // Convert expected value to float histogram. + expectedH := histogram_util.GenerateTestFloatHistogram(int(ts - 10)) + require.Equal(t, expectedH, point.H, strconv.Itoa(i)) + ts += int64(q.step / time.Millisecond) + } + } }, }, @@ -122,9 +156,24 @@ var ( samples: func(from, through time.Time, step time.Duration) int { return int(through.Sub(from) / step) }, - expected: func(t int64) (int64, float64) { - return t + int64((sampleRate*4)/time.Millisecond)*10, 1000.0 + expectedFunc: func(t testing.TB, q query, end model.Time, enc promchunk.Encoding, series promql.Series) { + var ts int64 + expected := func(t int64) (int64, float64) { + return t + int64((sampleRate*4)/time.Millisecond)*10, 1000.0 + } + from, through := time.Unix(0, 0), end.Time() + require.Equal(t, q.samples(from, through, q.step), len(series.Floats)) + for i, point := range series.Floats { + expectedTime, expectedValue := expected(ts) + require.Equal(t, expectedTime, point.T, strconv.Itoa(i)) + require.Equal(t, expectedValue, point.F, strconv.Itoa(i)) + ts += int64(q.step / time.Millisecond) + } }, + // Skip running the query for non XOR chunk encoding to avoid putting + // histogram rate calculation logic here. + // We can test histogram queries using different promql functions. + encodings: []promchunk.Encoding{promchunk.PrometheusXorChunk}, }, // Single points gets with large step; excersise Seek performance. @@ -137,9 +186,70 @@ var ( samples: func(from, through time.Time, step time.Duration) int { return int(through.Sub(from)/step) + 1 }, - expected: func(t int64) (int64, float64) { - return t, float64(t) + expectedFunc: func(t testing.TB, q query, end model.Time, enc promchunk.Encoding, series promql.Series) { + var ts int64 + from, through := time.Unix(0, 0), end.Time() + switch enc { + case promchunk.PrometheusXorChunk: + require.Equal(t, q.samples(from, through, q.step), len(series.Floats)) + for i, point := range series.Floats { + require.Equal(t, ts, point.T, strconv.Itoa(i)) + require.Equal(t, float64(ts), point.F, strconv.Itoa(i)) + ts += int64(q.step / time.Millisecond) + } + case promchunk.PrometheusHistogramChunk, promchunk.PrometheusFloatHistogramChunk: + require.Equal(t, q.samples(from, through, q.step), len(series.Histograms)) + for i, point := range series.Histograms { + require.Equal(t, ts, point.T, strconv.Itoa(i)) + // Convert expected value to float histogram. + expectedH := histogram_util.GenerateTestFloatHistogram(int(ts - 10)) + require.Equal(t, expectedH, point.H, strconv.Itoa(i)) + ts += int64(q.step / time.Millisecond) + } + } + }, + }, + + { + query: "histogram_sum(foo)", + step: sampleRate * 4 * 10, + labels: labels.Labels{}, + samples: func(from, through time.Time, step time.Duration) int { + return int(through.Sub(from)/step) + 1 + }, + expectedFunc: func(t testing.TB, q query, end model.Time, enc promchunk.Encoding, series promql.Series) { + var ts int64 + from, through := time.Unix(0, 0), end.Time() + require.Equal(t, q.samples(from, through, q.step), len(series.Floats)) + for i, point := range series.Floats { + expectedFH := histogram_util.GenerateTestFloatHistogram(int(ts - 10)) + require.Equal(t, ts, point.T, strconv.Itoa(i)) + require.Equal(t, expectedFH.Sum, point.F, strconv.Itoa(i)) + ts += int64(q.step / time.Millisecond) + } }, + encodings: []promchunk.Encoding{promchunk.PrometheusHistogramChunk, promchunk.PrometheusFloatHistogramChunk}, + }, + + { + query: "histogram_count(foo)", + step: sampleRate * 4 * 10, + labels: labels.Labels{}, + samples: func(from, through time.Time, step time.Duration) int { + return int(through.Sub(from)/step) + 1 + }, + expectedFunc: func(t testing.TB, q query, end model.Time, enc promchunk.Encoding, series promql.Series) { + var ts int64 + from, through := time.Unix(0, 0), end.Time() + require.Equal(t, q.samples(from, through, q.step), len(series.Floats)) + for i, point := range series.Floats { + expectedFH := histogram_util.GenerateTestFloatHistogram(int(ts - 10)) + require.Equal(t, ts, point.T, strconv.Itoa(i)) + require.Equal(t, expectedFH.Count, point.F, strconv.Itoa(i)) + ts += int64(q.step / time.Millisecond) + } + }, + encodings: []promchunk.Encoding{promchunk.PrometheusHistogramChunk, promchunk.PrometheusFloatHistogramChunk}, }, } ) @@ -154,113 +264,105 @@ func TestShouldSortSeriesIfQueryingMultipleQueryables(t *testing.T) { overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) const chunks = 1 require.NoError(t, err) - - labelsSets := []labels.Labels{ - { - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "order", Value: "1"}, - }, - { - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "order", Value: "2"}, - }, - } - - db, samples := mockTSDB(t, labelsSets, model.Time(start.Unix()*1000), int(chunks*samplesPerChunk), sampleRate, chunkOffset, int(samplesPerChunk)) distributor := &MockDistributor{} - unorderedResponse := client.QueryStreamResponse{ - Chunkseries: []client.TimeSeriesChunk{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "order", Value: "2"}, + for _, enc := range encodings { + chks1, _ := makeMockChunks(t, chunks, enc, model.TimeFromUnix(start.Unix()), labels.Label{Name: "order", Value: "1"}) + chks2, _ := makeMockChunks(t, chunks, enc, model.TimeFromUnix(start.Unix()), labels.Label{Name: "order", Value: "2"}) + + db := NewMockStoreQueryable(&mockChunkStore{chunks: append(chks1, chks2...)}) + + clientChks1, err := chunkcompat.ToChunks(chks1) + require.NoError(t, err) + clientChks2, err := chunkcompat.ToChunks(chks2) + require.NoError(t, err) + unorderedResponse := client.QueryStreamResponse{ + Chunkseries: []client.TimeSeriesChunk{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: model.MetricNameLabel, Value: "foo"}, + {Name: "order", Value: "2"}, + }, + Chunks: clientChks2, }, - Chunks: ConvertToChunks(t, samples, nil), - }, - { - Labels: []cortexpb.LabelAdapter{ - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "order", Value: "1"}, + { + Labels: []cortexpb.LabelAdapter{ + {Name: model.MetricNameLabel, Value: "foo"}, + {Name: "order", Value: "1"}, + }, + Chunks: clientChks1, }, - Chunks: ConvertToChunks(t, samples, nil), }, - }, - } + } - distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&unorderedResponse, nil) - distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, batch.NewChunkMergeIterator, cfg.QueryIngestersWithin) + distributor.On("QueryStream", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&unorderedResponse, nil) + distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, batch.NewChunkMergeIterator, cfg.QueryIngestersWithin) - tCases := []struct { - name string - distributorQueryable QueryableWithFilter - storeQueriables []QueryableWithFilter - sorted bool - }{ - { - name: "should sort if querying 2 queryables", - distributorQueryable: distributorQueryable, - storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(db)}, - sorted: true, - }, - { - name: "should not sort if querying only ingesters", - distributorQueryable: distributorQueryable, - storeQueriables: []QueryableWithFilter{UseBeforeTimestampQueryable(db, start.Add(-1*time.Hour))}, - sorted: false, - }, - { - name: "should not sort if querying only stores", - distributorQueryable: UseBeforeTimestampQueryable(distributorQueryable, start.Add(-1*time.Hour)), - storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(db)}, - sorted: false, - }, - { - name: "should sort if querying 2 queryables with streaming off", - distributorQueryable: distributorQueryable, - storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(db)}, - sorted: true, - }, - } + tCases := []struct { + name string + distributorQueryable QueryableWithFilter + storeQueriables []QueryableWithFilter + sorted bool + }{ + { + name: "should sort if querying 2 queryables", + distributorQueryable: distributorQueryable, + storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(db)}, + sorted: true, + }, + { + name: "should not sort if querying only ingesters", + distributorQueryable: distributorQueryable, + storeQueriables: []QueryableWithFilter{UseBeforeTimestampQueryable(db, start.Add(-1*time.Hour))}, + sorted: false, + }, + { + name: "should not sort if querying only stores", + distributorQueryable: UseBeforeTimestampQueryable(distributorQueryable, start.Add(-1*time.Hour)), + storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(db)}, + sorted: false, + }, + } - for _, tc := range tCases { - for _, thanosEngine := range []bool{false, true} { - thanosEngine := thanosEngine - t.Run(tc.name+fmt.Sprintf("thanos engine: %s", strconv.FormatBool(thanosEngine)), func(t *testing.T) { - wDistributorQueriable := &wrappedSampleAndChunkQueryable{QueryableWithFilter: tc.distributorQueryable} - var wQueriables []QueryableWithFilter - for _, queryable := range tc.storeQueriables { - wQueriables = append(wQueriables, &wrappedSampleAndChunkQueryable{QueryableWithFilter: queryable}) - } - queryable := NewQueryable(wDistributorQueriable, wQueriables, batch.NewChunkMergeIterator, cfg, overrides) - opts := promql.EngineOpts{ - Logger: log.NewNopLogger(), - MaxSamples: 1e6, - Timeout: 1 * time.Minute, - } - var queryEngine promql.QueryEngine - if thanosEngine { - queryEngine = engine.New(engine.Opts{ - EngineOpts: opts, - LogicalOptimizers: logicalplan.AllOptimizers, - }) - } else { - queryEngine = promql.NewEngine(opts) - } + for _, tc := range tCases { + for _, thanosEngine := range []bool{false, true} { + thanosEngine := thanosEngine + t.Run(tc.name+fmt.Sprintf("thanos engine: %t, encoding=%s", thanosEngine, enc.String()), func(t *testing.T) { + wDistributorQueriable := &wrappedSampleAndChunkQueryable{QueryableWithFilter: tc.distributorQueryable} + var wQueriables []QueryableWithFilter + for _, queryable := range tc.storeQueriables { + wQueriables = append(wQueriables, &wrappedSampleAndChunkQueryable{QueryableWithFilter: queryable}) + } + queryable := NewQueryable(wDistributorQueriable, wQueriables, batch.NewChunkMergeIterator, cfg, overrides) + opts := promql.EngineOpts{ + Logger: log.NewNopLogger(), + MaxSamples: 1e6, + Timeout: 1 * time.Minute, + } + var queryEngine promql.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } - query, err := queryEngine.NewRangeQuery(ctx, queryable, nil, "foo", start, end, 1*time.Minute) - r := query.Exec(ctx) + query, err := queryEngine.NewRangeQuery(ctx, queryable, nil, "foo", start, end, 1*time.Minute) + r := query.Exec(ctx) - require.NoError(t, err) - require.Equal(t, 2, r.Value.(promql.Matrix).Len()) + require.NoError(t, err) + require.Equal(t, 2, r.Value.(promql.Matrix).Len()) - for _, queryable := range append(wQueriables, wDistributorQueriable) { - var wQueryable = queryable.(*wrappedSampleAndChunkQueryable) - if wQueryable.UseQueryable(time.Now(), start.Unix()*1000, end.Unix()*1000) { - require.Equal(t, tc.sorted, wQueryable.queriers[0].selectCallsArgs[0][0]) + for _, queryable := range append(wQueriables, wDistributorQueriable) { + var wQueryable = queryable.(*wrappedSampleAndChunkQueryable) + if wQueryable.UseQueryable(time.Now(), start.Unix()*1000, end.Unix()*1000) { + require.Equal(t, tc.sorted, wQueryable.queriers[0].selectCallsArgs[0][0]) + } } - } - }) + }) + } } } } @@ -290,173 +392,152 @@ func TestLimits(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "0") var cfg Config flagext.DefaultValues(&cfg) - const chunks = 1 - labelsSets := []labels.Labels{ - { - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "order", Value: "1"}, - }, - { - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "order", Value: "2"}, - }, - { - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "orders", Value: "3"}, - }, - { - {Name: model.MetricNameLabel, Value: "bar"}, - {Name: "orders", Value: "4"}, - }, - { - {Name: model.MetricNameLabel, Value: "bar"}, - {Name: "orders", Value: "5"}, - }, - } - - _, samples := mockTSDB(t, labelsSets, model.Time(start.Unix()*1000), int(chunks*samplesPerChunk), sampleRate, chunkOffset, int(samplesPerChunk)) - - streamResponse := client.QueryStreamResponse{ - Chunkseries: []client.TimeSeriesChunk{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "order", Value: "2"}, + for _, enc := range encodings { + chks, _ := makeMockChunks(t, chunks, enc, model.TimeFromUnix(start.Unix())) + clientChks, err := chunkcompat.ToChunks(chks) + require.NoError(t, err) + streamResponse := client.QueryStreamResponse{ + Chunkseries: []client.TimeSeriesChunk{ + { + Labels: []cortexpb.LabelAdapter{ + {Name: model.MetricNameLabel, Value: "foo"}, + {Name: "order", Value: "2"}, + }, + Chunks: clientChks, }, - Chunks: ConvertToChunks(t, samples, nil), - }, - { - Labels: []cortexpb.LabelAdapter{ - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "order", Value: "1"}, + { + Labels: []cortexpb.LabelAdapter{ + {Name: model.MetricNameLabel, Value: "foo"}, + {Name: "order", Value: "1"}, + }, + Chunks: clientChks, }, - Chunks: ConvertToChunks(t, samples, nil), - }, - { - Labels: []cortexpb.LabelAdapter{ - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "orders", Value: "3"}, + { + Labels: []cortexpb.LabelAdapter{ + {Name: model.MetricNameLabel, Value: "foo"}, + {Name: "orders", Value: "3"}, + }, + Chunks: clientChks, }, - Chunks: ConvertToChunks(t, samples, nil), - }, - { - Labels: []cortexpb.LabelAdapter{ - {Name: model.MetricNameLabel, Value: "bar"}, - {Name: "orders", Value: "2"}, + { + Labels: []cortexpb.LabelAdapter{ + {Name: model.MetricNameLabel, Value: "bar"}, + {Name: "orders", Value: "2"}, + }, + Chunks: clientChks, }, - Chunks: ConvertToChunks(t, samples, nil), - }, - { - Labels: []cortexpb.LabelAdapter{ - {Name: model.MetricNameLabel, Value: "bar"}, - {Name: "orders", Value: "1"}, + { + Labels: []cortexpb.LabelAdapter{ + {Name: model.MetricNameLabel, Value: "bar"}, + {Name: "orders", Value: "1"}, + }, + Chunks: clientChks, }, - Chunks: ConvertToChunks(t, samples, nil), }, - }, - } - - distributor := &MockLimitingDistributor{ - response: &streamResponse, - } + } - distributorQueryableStreaming := newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, batch.NewChunkMergeIterator, cfg.QueryIngestersWithin) + distributor := &MockLimitingDistributor{ + response: &streamResponse, + } - tCases := []struct { - name string - description string - distributorQueryable QueryableWithFilter - storeQueriables []QueryableWithFilter - tenantLimit validation.TenantLimits - query string - assert func(t *testing.T, r *promql.Result) - }{ - { + distributorQueryableStreaming := newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, batch.NewChunkMergeIterator, cfg.QueryIngestersWithin) + + tCases := []struct { + name string + description string + distributorQueryable QueryableWithFilter + storeQueriables []QueryableWithFilter + tenantLimit validation.TenantLimits + query string + assert func(t *testing.T, r *promql.Result) + }{ + { - name: "should result in limit failure for multi-select and an individual select hits the series limit", - description: "query results in multi-select but duplicate finger prints get deduped but still results in # of series greater than limit", - query: "foo + foo", - distributorQueryable: distributorQueryableStreaming, - storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(distributorQueryableStreaming)}, - tenantLimit: &tenantLimit{ - MaxFetchedSeriesPerQuery: 2, - }, - assert: func(t *testing.T, r *promql.Result) { - require.Error(t, r.Err) + name: "should result in limit failure for multi-select and an individual select hits the series limit", + description: "query results in multi-select but duplicate finger prints get deduped but still results in # of series greater than limit", + query: "foo + foo", + distributorQueryable: distributorQueryableStreaming, + storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(distributorQueryableStreaming)}, + tenantLimit: &tenantLimit{ + MaxFetchedSeriesPerQuery: 2, + }, + assert: func(t *testing.T, r *promql.Result) { + require.Error(t, r.Err) + }, }, - }, - { + { - name: "should not result in limit failure for multi-select and the query does not hit the series limit", - description: "query results in multi-select but duplicate series finger prints get deduped resulting in # of series within the limit", - query: "foo + foo", - distributorQueryable: distributorQueryableStreaming, - storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(distributorQueryableStreaming)}, - tenantLimit: &tenantLimit{ - MaxFetchedSeriesPerQuery: 3, - }, - assert: func(t *testing.T, r *promql.Result) { - require.NoError(t, r.Err) + name: "should not result in limit failure for multi-select and the query does not hit the series limit", + description: "query results in multi-select but duplicate series finger prints get deduped resulting in # of series within the limit", + query: "foo + foo", + distributorQueryable: distributorQueryableStreaming, + storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(distributorQueryableStreaming)}, + tenantLimit: &tenantLimit{ + MaxFetchedSeriesPerQuery: 3, + }, + assert: func(t *testing.T, r *promql.Result) { + require.NoError(t, r.Err) + }, }, - }, - { + { - name: "should result in limit failure for multi-select and query hits the series limit", - description: "query results in multi-select but each individual select does not hit the limit but cumulatively the query hits the limit", - query: "foo + bar", - distributorQueryable: distributorQueryableStreaming, - storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(distributorQueryableStreaming)}, - tenantLimit: &tenantLimit{ - MaxFetchedSeriesPerQuery: 3, - }, - assert: func(t *testing.T, r *promql.Result) { - require.Error(t, r.Err) + name: "should result in limit failure for multi-select and query hits the series limit", + description: "query results in multi-select but each individual select does not hit the limit but cumulatively the query hits the limit", + query: "foo + bar", + distributorQueryable: distributorQueryableStreaming, + storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(distributorQueryableStreaming)}, + tenantLimit: &tenantLimit{ + MaxFetchedSeriesPerQuery: 3, + }, + assert: func(t *testing.T, r *promql.Result) { + require.Error(t, r.Err) + }, }, - }, - { + { - name: "should not result in limit failure for multi-select and query does not hit the series limit", - description: "query results in multi-select and the cumulative limit is >= series", - query: "foo + bar", - distributorQueryable: distributorQueryableStreaming, - storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(distributorQueryableStreaming)}, - tenantLimit: &tenantLimit{ - MaxFetchedSeriesPerQuery: 5, - }, - assert: func(t *testing.T, r *promql.Result) { - require.NoError(t, r.Err) + name: "should not result in limit failure for multi-select and query does not hit the series limit", + description: "query results in multi-select and the cumulative limit is >= series", + query: "foo + bar", + distributorQueryable: distributorQueryableStreaming, + storeQueriables: []QueryableWithFilter{UseAlwaysQueryable(distributorQueryableStreaming)}, + tenantLimit: &tenantLimit{ + MaxFetchedSeriesPerQuery: 5, + }, + assert: func(t *testing.T, r *promql.Result) { + require.NoError(t, r.Err) + }, }, - }, - } + } - for i, tc := range tCases { - t.Run(tc.name+fmt.Sprintf(", Test: %d", i), func(t *testing.T) { - wDistributorQueriable := &wrappedSampleAndChunkQueryable{QueryableWithFilter: tc.distributorQueryable} - var wQueriables []QueryableWithFilter - for _, queryable := range tc.storeQueriables { - wQueriables = append(wQueriables, &wrappedSampleAndChunkQueryable{QueryableWithFilter: queryable}) - } - overrides, err := validation.NewOverrides(DefaultLimitsConfig(), tc.tenantLimit) - require.NoError(t, err) + for i, tc := range tCases { + t.Run(tc.name+fmt.Sprintf(", encoding=%s, Test: %d", enc.String(), i), func(t *testing.T) { + wDistributorQueriable := &wrappedSampleAndChunkQueryable{QueryableWithFilter: tc.distributorQueryable} + var wQueriables []QueryableWithFilter + for _, queryable := range tc.storeQueriables { + wQueriables = append(wQueriables, &wrappedSampleAndChunkQueryable{QueryableWithFilter: queryable}) + } + overrides, err := validation.NewOverrides(DefaultLimitsConfig(), tc.tenantLimit) + require.NoError(t, err) - queryable := NewQueryable(wDistributorQueriable, wQueriables, batch.NewChunkMergeIterator, cfg, overrides) - opts := promql.EngineOpts{ - Logger: log.NewNopLogger(), - MaxSamples: 1e6, - Timeout: 1 * time.Minute, - } + queryable := NewQueryable(wDistributorQueriable, wQueriables, batch.NewChunkMergeIterator, cfg, overrides) + opts := promql.EngineOpts{ + Logger: log.NewNopLogger(), + MaxSamples: 1e6, + Timeout: 1 * time.Minute, + } - queryEngine := promql.NewEngine(opts) + queryEngine := promql.NewEngine(opts) - query, err := queryEngine.NewRangeQuery(ctx, queryable, nil, tc.query, start, end, 1*time.Minute) - require.NoError(t, err) + query, err := queryEngine.NewRangeQuery(ctx, queryable, nil, tc.query, start, end, 1*time.Minute) + require.NoError(t, err) - r := query.Exec(ctx) + r := query.Exec(ctx) - tc.assert(t, r) - }) + tc.assert(t, r) + }) + } } } @@ -464,15 +545,8 @@ func TestQuerier(t *testing.T) { t.Parallel() var cfg Config flagext.DefaultValues(&cfg) - const chunks = 24 - // Generate TSDB head with the same samples as makeMockChunkStore. - lset := labels.Labels{ - {Name: model.MetricNameLabel, Value: "foo"}, - } - db, _ := mockTSDB(t, []labels.Labels{lset}, model.Time(0), int(chunks*samplesPerChunk), sampleRate, chunkOffset, int(samplesPerChunk)) - opts := promql.EngineOpts{ Logger: log.NewNopLogger(), MaxSamples: 1e6, @@ -480,29 +554,35 @@ func TestQuerier(t *testing.T) { } for _, thanosEngine := range []bool{false, true} { for _, query := range queries { - t.Run(fmt.Sprintf("thanosEngine=%s,query=%s", strconv.FormatBool(thanosEngine), query.query), func(t *testing.T) { - var queryEngine promql.QueryEngine - if thanosEngine { - queryEngine = engine.New(engine.Opts{ - EngineOpts: opts, - LogicalOptimizers: logicalplan.AllOptimizers, - }) - } else { - queryEngine = promql.NewEngine(opts) - } - // Disable active query tracker to avoid mmap error. - cfg.ActiveQueryTrackerDir = "" + encs := encodings + if len(query.encodings) > 0 { + encs = query.encodings + } + for _, enc := range encs { + t.Run(fmt.Sprintf("thanosEngine=%t,encoding=%s,query=%s", thanosEngine, enc.String(), query.query), func(t *testing.T) { + var queryEngine promql.QueryEngine + if thanosEngine { + queryEngine = engine.New(engine.Opts{ + EngineOpts: opts, + LogicalOptimizers: logicalplan.AllOptimizers, + }) + } else { + queryEngine = promql.NewEngine(opts) + } + // Disable active query tracker to avoid mmap error. + cfg.ActiveQueryTrackerDir = "" - chunkStore, through := makeMockChunkStore(t, chunks, promchunk.PrometheusXorChunk) - distributor := mockDistibutorFor(t, chunkStore.chunks) + chunkStore, through := makeMockChunkStore(t, chunks, enc) + distributor := mockDistibutorFor(t, chunkStore.chunks) - overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) - require.NoError(t, err) + overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) + require.NoError(t, err) - queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore)), UseAlwaysQueryable(db)} - queryable, _, _ := New(cfg, overrides, distributor, queryables, nil, log.NewNopLogger()) - testRangeQuery(t, queryable, queryEngine, through, query) - }) + queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(chunkStore))} + queryable, _, _ := New(cfg, overrides, distributor, queryables, nil, log.NewNopLogger()) + testRangeQuery(t, queryable, queryEngine, through, query, enc) + }) + } } } } @@ -529,47 +609,6 @@ func TestQuerierMetric(t *testing.T) { `), "cortex_max_concurrent_queries")) } -func mockTSDB(t *testing.T, labels []labels.Labels, mint model.Time, samples int, step, chunkOffset time.Duration, samplesPerChunk int) (storage.Queryable, []cortexpb.Sample) { - //parallel testing causes data race - opts := tsdb.DefaultHeadOptions() - opts.ChunkDirRoot = t.TempDir() - // We use TSDB head only. By using full TSDB DB, and appending samples to it, closing it would cause unnecessary HEAD compaction, which slows down the test. - head, err := tsdb.NewHead(nil, nil, nil, nil, opts, nil) - require.NoError(t, err) - t.Cleanup(func() { - _ = head.Close() - }) - - app := head.Appender(context.Background()) - rSamples := []cortexpb.Sample{} - - for _, lset := range labels { - cnt := 0 - chunkStartTs := mint - ts := chunkStartTs - for i := 0; i < samples; i++ { - _, err := app.Append(0, lset, int64(ts), float64(ts)) - rSamples = append(rSamples, cortexpb.Sample{TimestampMs: int64(ts), Value: float64(ts)}) - require.NoError(t, err) - cnt++ - - ts = ts.Add(step) - - if cnt%samplesPerChunk == 0 { - // Simulate next chunk, restart timestamp. - chunkStartTs = chunkStartTs.Add(chunkOffset) - ts = chunkStartTs - } - } - } - - require.NoError(t, app.Commit()) - - return storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { - return tsdb.NewBlockQuerier(head, mint, maxt) - }), rSamples -} - func TestNoHistoricalQueryToIngester(t *testing.T) { t.Parallel() testCases := []struct { @@ -645,7 +684,7 @@ func TestNoHistoricalQueryToIngester(t *testing.T) { require.NoError(t, err) ctx := user.InjectOrgID(context.Background(), "0") - queryable, _, _ := New(cfg, overrides, distributor, []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))}, nil, log.NewNopLogger()) + queryable, _, _ := New(cfg, overrides, distributor, []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(chunkStore))}, nil, log.NewNopLogger()) query, err := queryEngine.NewRangeQuery(ctx, queryable, nil, "dummy", c.mint, c.maxt, 1*time.Minute) require.NoError(t, err) @@ -739,7 +778,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryIntoFuture(t *testing.T) { require.NoError(t, err) ctx := user.InjectOrgID(context.Background(), "0") - queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))} + queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(chunkStore))} queryable, _, _ := New(cfg, overrides, distributor, queryables, nil, log.NewNopLogger()) query, err := queryEngine.NewRangeQuery(ctx, queryable, nil, "dummy", c.queryStartTime, c.queryEndTime, time.Minute) require.NoError(t, err) @@ -833,7 +872,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLength(t *testing.T) { chunkStore := &emptyChunkStore{} distributor := &emptyDistributor{} - queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))} + queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(chunkStore))} queryable, _, _ := New(cfg, overrides, distributor, queryables, nil, log.NewNopLogger()) queryEngine := promql.NewEngine(opts) @@ -853,6 +892,42 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLength(t *testing.T) { } } +func TestQuerier_ValidateQueryTimeRange_MaxQueryLength_Series(t *testing.T) { + t.Parallel() + const maxQueryLength = 30 * 24 * time.Hour + + //parallel testing causes data race + var cfg Config + flagext.DefaultValues(&cfg) + // Disable active query tracker to avoid mmap error. + cfg.ActiveQueryTrackerDir = "" + // Ignore max query length check at Querier but it still enforces it for Series. + cfg.IgnoreMaxQueryLength = true + + limits := DefaultLimitsConfig() + limits.MaxQueryLength = model.Duration(maxQueryLength) + overrides, err := validation.NewOverrides(limits, nil) + require.NoError(t, err) + + chunkStore := &emptyChunkStore{} + distributor := &emptyDistributor{} + + queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(chunkStore))} + queryable, _, _ := New(cfg, overrides, distributor, queryables, nil, log.NewNopLogger()) + + ctx := user.InjectOrgID(context.Background(), "test") + now := time.Now() + end := now.Add(-time.Minute) + start := end.Add(-maxQueryLength - time.Hour) + minT := util.TimeToMillis(start) + maxT := util.TimeToMillis(end) + q, err := queryable.Querier(minT, maxT) + require.NoError(t, err) + ss := q.Select(ctx, false, &storage.SelectHints{Func: "series", Start: minT, End: maxT}) + require.False(t, ss.Next()) + require.True(t, strings.Contains(ss.Err().Error(), "the query time range exceeds the limit (query length: 721h0m0s, limit: 720h0m0s)")) +} + func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { t.Parallel() const ( @@ -968,7 +1043,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { require.NoError(t, err) chunkStore := &emptyChunkStore{} - queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))} + queryables := []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(chunkStore))} t.Run("query range", func(t *testing.T) { if testData.query == "" { @@ -1201,7 +1276,7 @@ func mockDistibutorFor(t *testing.T, cks []chunk.Chunk) *MockDistributor { return result } -func testRangeQuery(t testing.TB, queryable storage.Queryable, queryEngine promql.QueryEngine, end model.Time, q query) *promql.Result { +func testRangeQuery(t testing.TB, queryable storage.Queryable, queryEngine promql.QueryEngine, end model.Time, q query, enc promchunk.Encoding) *promql.Result { from, through, step := time.Unix(0, 0), end.Time(), q.step ctx := user.InjectOrgID(context.Background(), "0") query, err := queryEngine.NewRangeQuery(ctx, queryable, nil, q.query, from, through, step) @@ -1214,14 +1289,7 @@ func testRangeQuery(t testing.TB, queryable storage.Queryable, queryEngine promq require.Len(t, m, 1) series := m[0] require.Equal(t, q.labels, series.Metric) - require.Equal(t, q.samples(from, through, step), len(series.Floats)) - var ts int64 - for i, point := range series.Floats { - expectedTime, expectedValue := q.expected(ts) - require.Equal(t, expectedTime, point.T, strconv.Itoa(i)) - require.Equal(t, expectedValue, point.F, strconv.Itoa(i)) - ts += int64(step / time.Millisecond) - } + q.expectedFunc(t, q, end, enc, series) return r } @@ -1319,8 +1387,8 @@ type mockStore interface { } // NewMockStoreQueryable returns the storage.Queryable implementation against the chunks store. -func NewMockStoreQueryable(cfg Config, store mockStore) storage.Queryable { - return newMockStoreQueryable(store, getChunksIteratorFunction(cfg)) +func NewMockStoreQueryable(store mockStore) storage.Queryable { + return newMockStoreQueryable(store, getChunksIteratorFunction(Config{})) } func newMockStoreQueryable(store mockStore, chunkIteratorFunc chunkIteratorFunc) storage.Queryable { @@ -1431,7 +1499,7 @@ func TestShortTermQueryToLTS(t *testing.T) { overrides, err := validation.NewOverrides(DefaultLimitsConfig(), nil) require.NoError(t, err) - queryable, _, _ := New(cfg, overrides, distributor, []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(cfg, chunkStore))}, nil, log.NewNopLogger()) + queryable, _, _ := New(cfg, overrides, distributor, []QueryableWithFilter{UseAlwaysQueryable(NewMockStoreQueryable(chunkStore))}, nil, log.NewNopLogger()) ctx := user.InjectOrgID(context.Background(), "0") query, err := engine.NewRangeQuery(ctx, queryable, nil, "dummy", c.mint, c.maxt, 1*time.Minute) require.NoError(t, err) diff --git a/pkg/querier/tripperware/instantquery/limits.go b/pkg/querier/tripperware/instantquery/limits.go index b92157a5000..d782866148d 100644 --- a/pkg/querier/tripperware/instantquery/limits.go +++ b/pkg/querier/tripperware/instantquery/limits.go @@ -5,13 +5,12 @@ import ( "net/http" "time" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/weaveworks/common/httpgrpc" "github.com/cortexproject/cortex/pkg/querier/tripperware" "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/promql" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -52,10 +51,9 @@ func (l limitsMiddleware) Do(ctx context.Context, r tripperware.Request) (trippe } // Enforce query length across all selectors in the query. - min, max := promql.FindMinMaxTime(&parser.EvalStmt{Expr: expr, Start: util.TimeFromMillis(0), End: util.TimeFromMillis(0), LookbackDelta: l.lookbackDelta}) - diff := util.TimeFromMillis(max).Sub(util.TimeFromMillis(min)) - if diff > maxQueryLength { - return nil, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, diff, maxQueryLength) + length := promql.FindNonOverlapQueryLength(expr, 0, 0, l.lookbackDelta) + if length > maxQueryLength { + return nil, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, length, maxQueryLength) } } diff --git a/pkg/querier/tripperware/instantquery/limits_test.go b/pkg/querier/tripperware/instantquery/limits_test.go index 4cce781a1fa..4bf1cfac3c6 100644 --- a/pkg/querier/tripperware/instantquery/limits_test.go +++ b/pkg/querier/tripperware/instantquery/limits_test.go @@ -61,6 +61,10 @@ func TestLimitsMiddleware_MaxQueryLength(t *testing.T) { maxQueryLength: thirtyDays, expectedErr: "the query time range exceeds the limit", }, + "shouldn't exceed time range when having multiple selects with offset": { + query: `rate(up[5m]) + rate(up[5m] offset 40d) + rate(up[5m] offset 80d)`, + maxQueryLength: thirtyDays, + }, } for testName, testData := range tests { diff --git a/pkg/querier/tripperware/queryrange/limits.go b/pkg/querier/tripperware/queryrange/limits.go index 49249e8b4d2..a4d991aba8e 100644 --- a/pkg/querier/tripperware/queryrange/limits.go +++ b/pkg/querier/tripperware/queryrange/limits.go @@ -7,13 +7,13 @@ import ( "github.com/go-kit/log/level" "github.com/prometheus/prometheus/model/timestamp" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/weaveworks/common/httpgrpc" "github.com/cortexproject/cortex/pkg/querier/tripperware" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/promql" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -88,10 +88,9 @@ func (l limitsMiddleware) Do(ctx context.Context, r tripperware.Request) (trippe } // Enforce query length across all selectors in the query. - min, max := promql.FindMinMaxTime(&parser.EvalStmt{Expr: expr, Start: util.TimeFromMillis(0), End: util.TimeFromMillis(0), LookbackDelta: l.lookbackDelta}) - diff := util.TimeFromMillis(max).Sub(util.TimeFromMillis(min)) - if diff > maxQueryLength { - return nil, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, diff, maxQueryLength) + length := promql.FindNonOverlapQueryLength(expr, 0, 0, l.lookbackDelta) + if length > maxQueryLength { + return nil, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, length, maxQueryLength) } } diff --git a/pkg/querier/tripperware/queryrange/limits_test.go b/pkg/querier/tripperware/queryrange/limits_test.go index 25a28a6fb0a..5ee78fbf0bb 100644 --- a/pkg/querier/tripperware/queryrange/limits_test.go +++ b/pkg/querier/tripperware/queryrange/limits_test.go @@ -181,6 +181,12 @@ func TestLimitsMiddleware_MaxQueryLength(t *testing.T) { reqEndTime: now.Add(-2 * thirtyDays), expectedErr: "the query time range exceeds the limit", }, + "shouldn't exceed time range when having multiple selects with offset": { + query: `rate(up[5m]) + rate(up[5m] offset 40d) + rate(up[5m] offset 80d)`, + maxQueryLength: thirtyDays, + reqStartTime: now.Add(-time.Hour), + reqEndTime: now, + }, } for testName, testData := range tests { diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 4a02540ef63..b9b4ba2b356 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -25,8 +25,8 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/querier/stats" - "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" + promql_util "github.com/cortexproject/cortex/pkg/util/promql" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -43,12 +43,34 @@ type PusherAppender struct { pusher Pusher labels []labels.Labels samples []cortexpb.Sample + histogramLabels []labels.Labels + histograms []cortexpb.Histogram userID string evaluationDelay time.Duration } -func (a *PusherAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) { - return 0, errors.New("querying native histograms is not supported") +func (a *PusherAppender) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if h == nil && fh == nil { + return 0, errors.New("no histogram") + } + + if h != nil { + // A histogram sample is considered stale if its sum is set to NaN. + // https://github.com/prometheus/prometheus/blob/b6ef745016fa9472fdd0ae20f75a9682e01d1e5c/tsdb/head_append.go#L339-L346 + if a.evaluationDelay > 0 && (value.IsStaleNaN(h.Sum)) { + t -= a.evaluationDelay.Milliseconds() + } + a.histograms = append(a.histograms, cortexpb.HistogramToHistogramProto(t, h)) + } else { + // A histogram sample is considered stale if its sum is set to NaN. + // https://github.com/prometheus/prometheus/blob/b6ef745016fa9472fdd0ae20f75a9682e01d1e5c/tsdb/head_append.go#L339-L346 + if a.evaluationDelay > 0 && (value.IsStaleNaN(fh.Sum)) { + t -= a.evaluationDelay.Milliseconds() + } + a.histograms = append(a.histograms, cortexpb.FloatHistogramToHistogramProto(t, fh)) + } + a.histogramLabels = append(a.histogramLabels, l) + return 0, nil } func (a *PusherAppender) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { @@ -85,10 +107,11 @@ func (a *PusherAppender) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ func (a *PusherAppender) Commit() error { a.totalWrites.Inc() + req := cortexpb.ToWriteRequest(a.labels, a.samples, nil, nil, cortexpb.RULE) + req.AddHistogramTimeSeries(a.histogramLabels, a.histograms) // Since a.pusher is distributor, client.ReuseSlice will be called in a.pusher.Push. // We shouldn't call client.ReuseSlice here. - _, err := a.pusher.Push(user.InjectOrgID(a.ctx, a.userID), cortexpb.ToWriteRequest(a.labels, a.samples, nil, nil, cortexpb.RULE)) - + _, err := a.pusher.Push(user.InjectOrgID(a.ctx, a.userID), req) if err != nil { // Don't report errors that ended with 4xx HTTP status code (series limits, duplicate samples, out of order, etc.) if resp, ok := httpgrpc.HTTPResponseFromError(err); !ok || resp.Code/100 != 4 { @@ -98,6 +121,8 @@ func (a *PusherAppender) Commit() error { a.labels = nil a.samples = nil + a.histogramLabels = nil + a.histograms = nil return err } @@ -108,6 +133,8 @@ func (a *PusherAppender) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ func (a *PusherAppender) Rollback() error { a.labels = nil a.samples = nil + a.histogramLabels = nil + a.histograms = nil return nil } @@ -167,10 +194,9 @@ func EngineQueryFunc(engine promql.QueryEngine, q storage.Queryable, overrides R // Fail the query in the engine. if err == nil { // Enforce query length across all selectors in the query. - min, max := promql.FindMinMaxTime(&parser.EvalStmt{Expr: expr, Start: util.TimeFromMillis(0), End: util.TimeFromMillis(0), LookbackDelta: lookbackDelta}) - diff := util.TimeFromMillis(max).Sub(util.TimeFromMillis(min)) - if diff > maxQueryLength { - return nil, validation.LimitError(fmt.Sprintf(validation.ErrQueryTooLong, diff, maxQueryLength)) + length := promql_util.FindNonOverlapQueryLength(expr, 0, 0, lookbackDelta) + if length > maxQueryLength { + return nil, validation.LimitError(fmt.Sprintf(validation.ErrQueryTooLong, length, maxQueryLength)) } } } diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index 71db759d64b..8648ddd5f33 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -13,6 +13,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" @@ -20,6 +22,7 @@ import ( "github.com/weaveworks/common/httpgrpc" "github.com/cortexproject/cortex/pkg/cortexpb" + histogram_util "github.com/cortexproject/cortex/pkg/util/histogram" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -38,63 +41,357 @@ func TestPusherAppendable(t *testing.T) { pusher := &fakePusher{} pa := NewPusherAppendable(pusher, "user-1", nil, prometheus.NewCounter(prometheus.CounterOpts{}), prometheus.NewCounter(prometheus.CounterOpts{})) + lbls1 := cortexpb.FromLabelsToLabelAdapters(labels.FromMap(map[string]string{labels.MetricName: "foo_bar"})) + lbls2 := cortexpb.FromLabelsToLabelAdapters(labels.FromMap(map[string]string{labels.MetricName: "ALERTS", labels.AlertName: "boop"})) + lbls3 := cortexpb.FromLabelsToLabelAdapters(labels.FromMap(map[string]string{labels.MetricName: "ALERTS_FOR_STATE", labels.AlertName: "boop"})) + + testHistogram := histogram_util.GenerateTestHistogram(1) + testFloatHistogram := histogram_util.GenerateTestFloatHistogram(2) + testHistogramWithNaN := histogram_util.GenerateTestHistogram(1) + testFloatHistogramWithNaN := histogram_util.GenerateTestFloatHistogram(1) + testHistogramWithNaN.Sum = math.Float64frombits(value.StaleNaN) + testFloatHistogramWithNaN.Sum = math.Float64frombits(value.StaleNaN) + for _, tc := range []struct { - name string - series string - evalDelay time.Duration - value float64 - expectedTS int64 + name string + series string + evalDelay time.Duration + value float64 + histogram *histogram.Histogram + floatHistogram *histogram.FloatHistogram + expectedReq *cortexpb.WriteRequest }{ { - name: "tenant without delay, normal value", - series: "foo_bar", - value: 1.234, - expectedTS: 120_000, + name: "tenant without delay, normal value", + series: "foo_bar", + value: 1.234, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Samples: []cortexpb.Sample{ + {Value: 1.234, TimestampMs: 120_000}, + }, + }, + }, + }, + Source: cortexpb.RULE, + }, + }, + { + name: "tenant without delay, stale nan value", + series: "foo_bar", + value: math.Float64frombits(value.StaleNaN), + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Samples: []cortexpb.Sample{ + {Value: math.Float64frombits(value.StaleNaN), TimestampMs: 120_000}, + }, + }, + }, + }, + Source: cortexpb.RULE, + }, + }, + { + name: "tenant with delay, normal value", + series: "foo_bar", + value: 1.234, + evalDelay: time.Minute, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Samples: []cortexpb.Sample{ + {Value: 1.234, TimestampMs: 120_000}, + }, + }, + }, + }, + Source: cortexpb.RULE, + }, + }, + { + name: "tenant with delay, stale nan value", + series: "foo_bar", + value: math.Float64frombits(value.StaleNaN), + evalDelay: time.Minute, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Samples: []cortexpb.Sample{ + {Value: math.Float64frombits(value.StaleNaN), TimestampMs: 60_000}, + }, + }, + }, + }, + Source: cortexpb.RULE, + }, + }, + { + name: "ALERTS without delay, normal value", + series: `ALERTS{alertname="boop"}`, + value: 1.234, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls2, + Samples: []cortexpb.Sample{ + {Value: 1.234, TimestampMs: 120_000}, + }, + }, + }, + }, + Source: cortexpb.RULE, + }, + }, + { + name: "ALERTS without delay, stale nan value", + series: `ALERTS{alertname="boop"}`, + value: math.Float64frombits(value.StaleNaN), + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls2, + Samples: []cortexpb.Sample{ + {Value: math.Float64frombits(value.StaleNaN), TimestampMs: 120_000}, + }, + }, + }, + }, + Source: cortexpb.RULE, + }, + }, + { + name: "ALERTS with delay, normal value", + series: `ALERTS{alertname="boop"}`, + value: 1.234, + evalDelay: time.Minute, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls2, + Samples: []cortexpb.Sample{ + {Value: 1.234, TimestampMs: 60_000}, + }, + }, + }, + }, + Source: cortexpb.RULE, + }, + }, + { + name: "ALERTS with delay, stale nan value", + series: `ALERTS_FOR_STATE{alertname="boop"}`, + value: math.Float64frombits(value.StaleNaN), + evalDelay: time.Minute, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls3, + Samples: []cortexpb.Sample{ + {Value: math.Float64frombits(value.StaleNaN), TimestampMs: 60_000}, + }, + }, + }, + }, + Source: cortexpb.RULE, + }, }, { - name: "tenant without delay, stale nan value", - series: "foo_bar", - value: math.Float64frombits(value.StaleNaN), - expectedTS: 120_000, + name: "tenant without delay, normal histogram", + series: "foo_bar", + histogram: testHistogram, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(120_000, testHistogram), + }, + }, + }, + }, + Source: cortexpb.RULE, + }, }, { - name: "tenant with delay, normal value", - series: "foo_bar", - value: 1.234, - expectedTS: 120_000, - evalDelay: time.Minute, + name: "tenant without delay, float histogram", + series: "foo_bar", + floatHistogram: testFloatHistogram, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Histograms: []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(120_000, testFloatHistogram), + }, + }, + }, + }, + Source: cortexpb.RULE, + }, }, { - name: "tenant with delay, stale nan value", - value: math.Float64frombits(value.StaleNaN), - expectedTS: 60_000, - evalDelay: time.Minute, + name: "tenant without delay, both sample and histogram", + series: "foo_bar", + value: 1.234, + histogram: testHistogram, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Samples: []cortexpb.Sample{ + {Value: 1.234, TimestampMs: 120_000}, + }, + }, + }, + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(120_000, testHistogram), + }, + }, + }, + }, + Source: cortexpb.RULE, + }, }, { - name: "ALERTS without delay, normal value", - series: `ALERTS{alertname="boop"}`, - value: 1.234, - expectedTS: 120_000, + name: "tenant without delay, both sample and float histogram", + series: "foo_bar", + value: 1.234, + floatHistogram: testFloatHistogram, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Samples: []cortexpb.Sample{ + {Value: 1.234, TimestampMs: 120_000}, + }, + }, + }, + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Histograms: []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(120_000, testFloatHistogram), + }, + }, + }, + }, + Source: cortexpb.RULE, + }, }, { - name: "ALERTS without delay, stale nan value", - series: `ALERTS{alertname="boop"}`, - value: math.Float64frombits(value.StaleNaN), - expectedTS: 120_000, + name: "tenant with delay and NaN sample, normal histogram", + series: "foo_bar", + value: math.Float64frombits(value.StaleNaN), + evalDelay: time.Minute, + histogram: testHistogram, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Samples: []cortexpb.Sample{ + {Value: math.Float64frombits(value.StaleNaN), TimestampMs: 60_000}, + }, + }, + }, + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(120_000, testHistogram), + }, + }, + }, + }, + Source: cortexpb.RULE, + }, }, { - name: "ALERTS with delay, normal value", - series: `ALERTS{alertname="boop"}`, - value: 1.234, - expectedTS: 60_000, - evalDelay: time.Minute, + name: "tenant with delay and NaN sample, float histogram", + series: "foo_bar", + value: math.Float64frombits(value.StaleNaN), + evalDelay: time.Minute, + floatHistogram: testFloatHistogram, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Samples: []cortexpb.Sample{ + {Value: math.Float64frombits(value.StaleNaN), TimestampMs: 60_000}, + }, + }, + }, + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Histograms: []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(120_000, testFloatHistogram), + }, + }, + }, + }, + Source: cortexpb.RULE, + }, }, { - name: "ALERTS with delay, stale nan value", - series: `ALERTS_FOR_STATE{alertname="boop"}`, - value: math.Float64frombits(value.StaleNaN), - expectedTS: 60_000, - evalDelay: time.Minute, + name: "tenant with delay, NaN histogram", + series: "foo_bar", + histogram: testHistogramWithNaN, + evalDelay: time.Minute, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(60_000, testHistogramWithNaN), + }, + }, + }, + }, + Source: cortexpb.RULE, + }, + }, + { + name: "tenant with delay, NaN float histogram", + series: "foo_bar", + floatHistogram: testFloatHistogramWithNaN, + evalDelay: time.Minute, + expectedReq: &cortexpb.WriteRequest{ + Timeseries: []cortexpb.PreallocTimeseries{ + { + TimeSeries: &cortexpb.TimeSeries{ + Labels: lbls1, + Histograms: []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(60_000, testFloatHistogramWithNaN), + }, + }, + }, + }, + Source: cortexpb.RULE, + }, }, } { t.Run(tc.name, func(t *testing.T) { @@ -108,13 +405,21 @@ func TestPusherAppendable(t *testing.T) { pusher.response = &cortexpb.WriteResponse{} a := pa.Appender(ctx) - _, err = a.Append(0, lbls, 120_000, tc.value) + // We don't ingest sample if value is set to 0 for testing purpose. + if tc.value != 0 { + _, err = a.Append(0, lbls, 120_000, tc.value) + require.NoError(t, err) + } + + if tc.histogram != nil { + _, err = a.AppendHistogram(0, lbls, 120_000, tc.histogram, nil) + } else if tc.floatHistogram != nil { + _, err = a.AppendHistogram(0, lbls, 120_000, nil, tc.floatHistogram) + } require.NoError(t, err) require.NoError(t, a.Commit()) - - require.Equal(t, tc.expectedTS, pusher.request.Timeseries[0].Samples[0].TimestampMs) - + require.Equal(t, tc.expectedReq.String(), pusher.request.String()) }) } } @@ -165,6 +470,11 @@ func TestPusherErrors(t *testing.T) { _, err = a.Append(0, lbls, int64(model.Now()), 123456) require.NoError(t, err) + _, err = a.AppendHistogram(0, lbls, int64(model.Now()), histogram_util.GenerateTestHistogram(1), nil) + require.NoError(t, err) + _, err = a.AppendHistogram(0, lbls, int64(model.Now()), nil, histogram_util.GenerateTestFloatHistogram(2)) + require.NoError(t, err) + require.Equal(t, tc.returnedError, a.Commit()) require.Equal(t, tc.expectedWrites, int(testutil.ToFloat64(writes))) diff --git a/pkg/util/http_test.go b/pkg/util/http_test.go index 4c399a86033..e4de5b6b967 100644 --- a/pkg/util/http_test.go +++ b/pkg/util/http_test.go @@ -153,7 +153,8 @@ func TestParseProtoReader(t *testing.T) { {Value: 20, TimestampMs: 2}, {Value: 30, TimestampMs: 3}, }, - Exemplars: []cortexpb.Exemplar{}, + Exemplars: []cortexpb.Exemplar{}, + Histograms: []cortexpb.Histogram{}, }, }, }, diff --git a/pkg/util/promql/promql.go b/pkg/util/promql/promql.go new file mode 100644 index 00000000000..a0d3f81883a --- /dev/null +++ b/pkg/util/promql/promql.go @@ -0,0 +1,128 @@ +package promql + +import ( + "math" + "sort" + "time" + + "github.com/prometheus/prometheus/promql/parser" + + "github.com/cortexproject/cortex/pkg/util" +) + +// FindNonOverlapQueryLength iterates through all the vector selectors in the statement and finds the time interval +// each selector will try to process. It merges intervals to be non overlapping and calculates the total duration as +// the query length. This takes into account offsets, @ modifiers, and range selectors. +// If the statement does not select series, then duration 0 will be returned. +func FindNonOverlapQueryLength(expr parser.Expr, start, end int64, lookbackDelta time.Duration) time.Duration { + type minMaxTime struct { + minTime, maxTime int64 + } + intervals := make([]minMaxTime, 0) + + // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. + // The evaluation of the VectorSelector inside then evaluates the given range and unsets + // the variable. + var evalRange time.Duration + parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + switch n := node.(type) { + case *parser.VectorSelector: + start, end := getTimeRangesForSelector(start, end, durationMilliseconds(lookbackDelta), n, path, evalRange) + intervals = append(intervals, minMaxTime{start, end}) + evalRange = 0 + case *parser.MatrixSelector: + evalRange = n.Range + } + return nil + }) + + if len(intervals) == 0 { + return 0 + } + + sort.Slice(intervals, func(i, j int) bool { + return intervals[i].minTime < intervals[j].minTime + }) + + prev := intervals[0] + length := time.Duration(0) + for i := 1; i < len(intervals); i++ { + if intervals[i].minTime <= prev.maxTime { + prev.maxTime = max(prev.maxTime, intervals[i].maxTime) + } else { + length += util.TimeFromMillis(prev.maxTime).Sub(util.TimeFromMillis(prev.minTime)) + prev = intervals[i] + } + } + length += util.TimeFromMillis(prev.maxTime).Sub(util.TimeFromMillis(prev.minTime)) + return length +} + +// Copied from https://github.com/prometheus/prometheus/blob/v2.52.0/promql/engine.go#L863. +func getTimeRangesForSelector(start, end, lookbackDelta int64, n *parser.VectorSelector, path []parser.Node, evalRange time.Duration) (int64, int64) { + subqOffset, subqRange, subqTs := subqueryTimes(path) + + if subqTs != nil { + // The timestamp on the subquery overrides the eval statement time ranges. + start = *subqTs + end = *subqTs + } + + if n.Timestamp != nil { + // The timestamp on the selector overrides everything. + start = *n.Timestamp + end = *n.Timestamp + } else { + offsetMilliseconds := durationMilliseconds(subqOffset) + start = start - offsetMilliseconds - durationMilliseconds(subqRange) + end -= offsetMilliseconds + } + + if evalRange == 0 { + start -= lookbackDelta + } else { + // For all matrix queries we want to ensure that we have (end-start) + range selected + // this way we have `range` data before the start time + start -= durationMilliseconds(evalRange) + } + + offsetMilliseconds := durationMilliseconds(n.OriginalOffset) + start -= offsetMilliseconds + end -= offsetMilliseconds + + return start, end +} + +// subqueryTimes returns the sum of offsets and ranges of all subqueries in the path. +// If the @ modifier is used, then the offset and range is w.r.t. that timestamp +// (i.e. the sum is reset when we have @ modifier). +// The returned *int64 is the closest timestamp that was seen. nil for no @ modifier. +// Copied from https://github.com/prometheus/prometheus/blob/v2.52.0/promql/engine.go#L803. +func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) { + var ( + subqOffset, subqRange time.Duration + ts int64 = math.MaxInt64 + ) + for _, node := range path { + if n, ok := node.(*parser.SubqueryExpr); ok { + subqOffset += n.OriginalOffset + subqRange += n.Range + if n.Timestamp != nil { + // The @ modifier on subquery invalidates all the offset and + // range till now. Hence resetting it here. + subqOffset = n.OriginalOffset + subqRange = n.Range + ts = *n.Timestamp + } + } + } + var tsp *int64 + if ts != math.MaxInt64 { + tsp = &ts + } + return subqOffset, subqRange, tsp +} + +func durationMilliseconds(d time.Duration) int64 { + return int64(d / (time.Millisecond / time.Nanosecond)) +} diff --git a/pkg/util/promql/promql_test.go b/pkg/util/promql/promql_test.go new file mode 100644 index 00000000000..ed35b89c2e3 --- /dev/null +++ b/pkg/util/promql/promql_test.go @@ -0,0 +1,87 @@ +package promql + +import ( + "testing" + "time" + + "github.com/prometheus/prometheus/promql/parser" + "github.com/stretchr/testify/require" +) + +func TestFindNonOverlapQueryLength(t *testing.T) { + for _, tc := range []struct { + name string + query string + expectedLength time.Duration + }{ + { + name: "number literal, no select", + query: `1`, + }, + { + name: "string literal, no select", + query: `"test"`, + }, + { + name: "function, no select", + query: `"time()"`, + }, + { + name: "single vector selector", + query: `up`, + expectedLength: time.Minute * 5, + }, + { + name: "single matrix selector", + query: `up[1h]`, + expectedLength: time.Hour, + }, + { + name: "sum rate", + query: `sum(rate(up[1h]))`, + expectedLength: time.Hour, + }, + { + name: "single vector selector with offset", + query: `up offset 7d`, + expectedLength: time.Minute * 5, + }, + { + name: "single matrix selector with offset", + query: `up[1h] offset 7d`, + expectedLength: time.Hour, + }, + { + name: "multiple vector selectors, dedup time range", + query: `sum(up) + sum(up) + sum(up)`, + expectedLength: time.Minute * 5, + }, + { + name: "multiple matrix selectors, dedup time range", + query: `sum_over_time(up[1h]) + sum_over_time(up[1h]) + sum_over_time(up[1h])`, + expectedLength: time.Hour, + }, + { + name: "multiple vector selectors with offsets", + query: `sum(up) + sum(up offset 1h) + sum(up offset 2h)`, + expectedLength: time.Minute * 15, + }, + { + name: "multiple matrix selectors with offsets", + query: `sum_over_time(up[1h]) + sum_over_time(up[1h] offset 1d) + sum_over_time(up[1h] offset 2d)`, + expectedLength: time.Hour * 3, + }, + { + name: "multiple sum rate with offsets", + query: `sum(rate(up[5m])) + sum(rate(up[5m] offset 1w)) + sum(rate(up[5m] offset 2w)) + sum(rate(up[5m] offset 3w)) + sum(rate(up[5m] offset 4w))`, + expectedLength: time.Minute * 5 * 5, + }, + } { + t.Run(tc.name, func(t *testing.T) { + expr, err := parser.ParseExpr(tc.query) + require.NoError(t, err) + duration := FindNonOverlapQueryLength(expr, 0, 0, time.Minute*5) + require.Equal(t, tc.expectedLength, duration) + }) + } +} diff --git a/pkg/util/test_util.go b/pkg/util/test_util.go index e196785bb56..aebf7406277 100644 --- a/pkg/util/test_util.go +++ b/pkg/util/test_util.go @@ -30,9 +30,16 @@ func GenerateRandomStrings() []string { return randomStrings } -func GenerateChunk(t require.TestingT, step time.Duration, from model.Time, points int, enc promchunk.Encoding) chunk.Chunk { - metric := labels.Labels{ - {Name: model.MetricNameLabel, Value: "foo"}, +func GenerateChunk(t require.TestingT, step time.Duration, from model.Time, points int, enc promchunk.Encoding, additionalLabels ...labels.Label) chunk.Chunk { + var hasMetricName bool + for _, lbl := range additionalLabels { + if lbl.Name == model.MetricNameLabel { + hasMetricName = true + } + } + metric := labels.NewBuilder(labels.New(additionalLabels...)) + if !hasMetricName { + metric = metric.Set(model.MetricNameLabel, "foo") } pe := enc.PromChunkEncoding() pc, err := chunkenc.NewEmptyChunk(pe) @@ -64,5 +71,5 @@ func GenerateChunk(t require.TestingT, step time.Duration, from model.Time, poin } ts = ts.Add(-step) // undo the add that we did just before exiting the loop - return chunk.NewChunk(metric, pc, from, ts) + return chunk.NewChunk(metric.Labels(), pc, from, ts) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore new file mode 100644 index 00000000000..8cdb9103650 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore @@ -0,0 +1,4 @@ +# live test artifacts +Dockerfile +k8s.yaml +sshkey* diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index f6749c03059..6d4b6feb86e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,35 @@ # Release History +## 1.6.0 (2024-06-10) + +### Features Added +* `NewOnBehalfOfCredentialWithClientAssertions` creates an on-behalf-of credential + that authenticates with client assertions such as federated credentials + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.4 +* Removed `AzurePipelinesCredential` and the persistent token caching API. + They will return in v1.7.0-beta.1 + +### Bugs Fixed +* Managed identity bug fixes + +## 1.6.0-beta.4 (2024-05-14) + +### Features Added +* `AzurePipelinesCredential` authenticates an Azure Pipeline service connection with + workload identity federation + +## 1.6.0-beta.3 (2024-04-09) + +### Breaking Changes +* `DefaultAzureCredential` now sends a probe request with no retries for IMDS managed identity + environments to avoid excessive retry delays when the IMDS endpoint is not available. This + should improve credential chain resolution for local development scenarios. + +### Bugs Fixed +* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances + ## 1.5.2 (2024-04-09) ### Bugs Fixed @@ -9,6 +39,28 @@ * Restored v1.4.0 error behavior for empty tenant IDs * Upgraded dependencies +## 1.6.0-beta.2 (2024-02-06) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.1 +* Replaced `ErrAuthenticationRequired` with `AuthenticationRequiredError`, a struct + type that carries the `TokenRequestOptions` passed to the `GetToken` call which + returned the error. + +### Bugs Fixed +* Fixed more cases in which credential chains like `DefaultAzureCredential` + should try their next credential after attempting managed identity + authentication in a Docker Desktop container + +### Other Changes +* `AzureCLICredential` uses the CLI's `expires_on` value for token expiration + +## 1.6.0-beta.1 (2024-01-17) + +### Features Added +* Restored persistent token caching API first added in v1.5.0-beta.1 +* Added `AzureCLICredentialOptions.Subscription` + ## 1.5.1 (2024-01-17) ### Bugs Fixed @@ -135,7 +187,7 @@ ### Features Added * By default, credentials set client capability "CP1" to enable support for - [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation). + [Continuous Access Evaluation (CAE)](https://learn.microsoft.com/entra/identity-platform/app-resilience-continuous-access-evaluation). This indicates to Microsoft Entra ID that your application can handle CAE claims challenges. You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". * `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md index 1a649202303..4404be82449 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -1,6 +1,6 @@ # Migrating from autorest/adal to azidentity -`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. +`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. @@ -284,7 +284,7 @@ if err == nil { } ``` -Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/permissions-consent-overview). +Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/permissions-consent-overview). ## Use azidentity credentials with older packages diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index b6ad2d39f84..b5acff0e632 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -1,9 +1,9 @@ # Azure Identity Client Module for Go -The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. +The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. [![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) -| [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/) +| [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity/) | [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) # Getting started @@ -30,7 +30,7 @@ When debugging and executing code locally, developers typically use their own ac #### Authenticating via the Azure CLI `DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user -signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. +signed in to the [Azure CLI](https://learn.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. When no default browser is available, `az login` will use the device code authentication flow. This can also be selected manually by running `az login --use-device-code`. @@ -69,14 +69,14 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. ## Managed Identity `DefaultAzureCredential` and `ManagedIdentityCredential` support -[managed identity authentication](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) +[managed identity authentication](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview) in any hosting environment which supports managed identities, such as (this list is not exhaustive): -* [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity) -* [Azure Arc](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) -* [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/msi-authorization) -* [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/use-managed-identity) -* [Azure Service Fabric](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity) -* [Azure Virtual Machines](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token) +* [Azure App Service](https://learn.microsoft.com/azure/app-service/overview-managed-identity) +* [Azure Arc](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) +* [Azure Cloud Shell](https://learn.microsoft.com/azure/cloud-shell/msi-authorization) +* [Azure Kubernetes Service](https://learn.microsoft.com/azure/aks/use-managed-identity) +* [Azure Service Fabric](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity) +* [Azure Virtual Machines](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-use-vm-token) ## Examples @@ -207,7 +207,7 @@ For more details, see the [token caching documentation](https://aka.ms/azsdk/go/ Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). -For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes). +For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes). ### Logging diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index c0d6601469c..f9cc4894339 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -45,7 +45,7 @@ With persistent disk token caching enabled, the library first determines if a va #### Example code -See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#pkg-overview) for code examples demonstrating how to configure persistent caching and access cached data. +See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data. ### Credentials supporting token caching diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 832c599eb90..3564e685e18 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -58,7 +58,7 @@ This error contains several pieces of information: - __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. -- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. +- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. - __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Microsoft Entra failures. @@ -97,17 +97,17 @@ azlog.SetEvents(azidentity.EventAuthentication) | Error Code | Issue | Mitigation | |---|---|---| -|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).| ## Troubleshoot ClientCertificateCredential authentication issues | Error Code | Description | Mitigation | |---|---|---| -|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).| ## Troubleshoot UsernamePasswordCredential authentication issues @@ -123,20 +123,20 @@ azlog.SetEvents(azidentity.EventAuthentication) |Host Environment| | | |---|---|---| -|Azure Virtual Machines and Scale Sets|[Configuration](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| -|Azure App Service and Azure Functions|[Configuration](https://docs.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| +|Azure Virtual Machines and Scale Sets|[Configuration](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| +|Azure App Service and Azure Functions|[Configuration](https://learn.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| |Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)| -|Azure Arc|[Configuration](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| -|Azure Service Fabric|[Configuration](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)|| +|Azure Arc|[Configuration](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| +|Azure Service Fabric|[Configuration](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity)|| ### Azure Virtual Machine managed identity | Error Message |Description| Mitigation | |---|---|---| -|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| +|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| |The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`| -|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|

  • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
  • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
| -|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
  • Refer to the error message for more details on specific failures.
  • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
  • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
| +|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|
  • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm).
  • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
| +|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
  • Refer to the error message for more details on specific failures.
  • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm).
  • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
| #### Verify IMDS is available on the VM @@ -152,7 +152,7 @@ curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://man | Error Message |Description| Mitigation | |---|---|---| -|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
  • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://docs.microsoft.com/azure/app-service/overview-managed-identity).
  • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
| +|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
  • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://learn.microsoft.com/azure/app-service/overview-managed-identity).
  • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
| #### Verify the App Service managed identity endpoint is available @@ -177,8 +177,8 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio | Error Message |Description| Mitigation | |---|---|---| -|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
  • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://docs.microsoft.com/cli/azure/install-azure-cli).
  • Validate the installation location is in the application's `PATH` environment variable.
| -|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
  • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://docs.microsoft.com/cli/azure/authenticate-azure-cli).
  • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
| +|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
  • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://learn.microsoft.com/cli/azure/install-azure-cli).
  • Validate the installation location is in the application's `PATH` environment variable.
| +|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
  • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://learn.microsoft.com/cli/azure/authenticate-azure-cli).
  • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
| #### Verify the Azure CLI can obtain tokens diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index 1be55a4bdd3..bff0c44dac4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_98074050dc" + "Tag": "go/azidentity_087379b475" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index 43577ab3c5f..b9976f5fede 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -35,9 +35,9 @@ type AzureCLICredentialOptions struct { // logged in account can access. AdditionallyAllowedTenants []string - // subscription is the name or ID of a subscription. Set this to acquire tokens for an account other + // Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other // than the Azure CLI's current account. - subscription string + Subscription string // TenantID identifies the tenant the credential should authenticate in. // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. @@ -68,9 +68,9 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent if options != nil { cp = *options } - for _, r := range cp.subscription { + for _, r := range cp.Subscription { if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') { - return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.subscription) + return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.Subscription) } } if cp.TenantID != "" && !validTenantID(cp.TenantID) { @@ -97,7 +97,7 @@ func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequ } c.mu.Lock() defer c.mu.Unlock() - b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.subscription) + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.Subscription) if err == nil { at, err = c.createAccessToken(b) } @@ -163,26 +163,21 @@ var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes [] func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { t := struct { - AccessToken string `json:"accessToken"` - Authority string `json:"_authority"` - ClientID string `json:"_clientId"` - ExpiresOn string `json:"expiresOn"` - IdentityProvider string `json:"identityProvider"` - IsMRRT bool `json:"isMRRT"` - RefreshToken string `json:"refreshToken"` - Resource string `json:"resource"` - TokenType string `json:"tokenType"` - UserID string `json:"userId"` + AccessToken string `json:"accessToken"` + Expires_On int64 `json:"expires_on"` + ExpiresOn string `json:"expiresOn"` }{} err := json.Unmarshal(tk, &t) if err != nil { return azcore.AccessToken{}, err } - // the Azure CLI's "expiresOn" is local time - exp, err := time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local) - if err != nil { - return azcore.AccessToken{}, fmt.Errorf("Error parsing token expiration time %q: %v", t.ExpiresOn, err) + exp := time.Unix(t.Expires_On, 0) + if t.Expires_On == 0 { + exp, err = time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("%s: error parsing token expiration time %q: %v", credNameAzureCLI, t.ExpiresOn, err) + } } converted := azcore.AccessToken{ diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go new file mode 100644 index 00000000000..2655543aee6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go @@ -0,0 +1,130 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +const ( + credNameAzurePipelines = "AzurePipelinesCredential" + oidcAPIVersion = "7.1" + systemAccessToken = "SYSTEM_ACCESSTOKEN" + systemOIDCRequestURI = "SYSTEM_OIDCREQUESTURI" +) + +// azurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See +// [Azure Pipelines documentation] for more information. +// +// [Azure Pipelines documentation]: https://learn.microsoft.com/azure/devops/pipelines/library/connect-to-azure?view=azure-devops#create-an-azure-resource-manager-service-connection-that-uses-workload-identity-federation +type azurePipelinesCredential struct { + connectionID, oidcURI, systemAccessToken string + cred *ClientAssertionCredential +} + +// azurePipelinesCredentialOptions contains optional parameters for AzurePipelinesCredential. +type azurePipelinesCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// newAzurePipelinesCredential is the constructor for AzurePipelinesCredential. In addition to its required arguments, +// it reads a security token for the running build, which is required to authenticate the service connection, from the +// environment variable SYSTEM_ACCESSTOKEN. See the [Azure Pipelines documentation] for an example showing how to set +// this variable in build job YAML. +// +// [Azure Pipelines documentation]: https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken +func newAzurePipelinesCredential(tenantID, clientID, serviceConnectionID string, options *azurePipelinesCredentialOptions) (*azurePipelinesCredential, error) { + if options == nil { + options = &azurePipelinesCredentialOptions{} + } + u := os.Getenv(systemOIDCRequestURI) + if u == "" { + return nil, fmt.Errorf("no value for environment variable %s. This should be set by Azure Pipelines", systemOIDCRequestURI) + } + sat := os.Getenv(systemAccessToken) + if sat == "" { + return nil, errors.New("no value for environment variable " + systemAccessToken) + } + a := azurePipelinesCredential{ + connectionID: serviceConnectionID, + oidcURI: u, + systemAccessToken: sat, + } + caco := ClientAssertionCredentialOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientAssertionCredential(tenantID, clientID, a.getAssertion, &caco) + if err != nil { + return nil, err + } + cred.client.name = credNameAzurePipelines + a.cred = cred + return &a, nil +} + +// GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically. +func (a *azurePipelinesCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameAzurePipelines+"."+traceOpGetToken, a.cred.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := a.cred.GetToken(ctx, opts) + return tk, err +} + +func (a *azurePipelinesCredential) getAssertion(ctx context.Context) (string, error) { + url := a.oidcURI + "?api-version=" + oidcAPIVersion + "&serviceConnectionId=" + a.connectionID + url, err := runtime.EncodeQueryParams(url) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil, nil) + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil, nil) + } + req.Header.Set("Authorization", "Bearer "+a.systemAccessToken) + res, err := doForClient(a.cred.client.azClient, req) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil, nil) + } + if res.StatusCode != http.StatusOK { + msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration" + // include the response because its body, if any, probably contains an error message. + // OK responses aren't included with errors because they probably contain secrets + return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res, nil) + } + b, err := runtime.Payload(res) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil, nil) + } + var r struct { + OIDCToken string `json:"oidcToken"` + } + err = json.Unmarshal(b, &r) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil, nil) + } + return r.OIDCToken, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go index dc855edf786..6c35a941b97 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -86,7 +86,7 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token errs []error successfulCredential azcore.TokenCredential token azcore.AccessToken - unavailableErr *credentialUnavailableError + unavailableErr credentialUnavailable ) for _, cred := range c.sources { token, err = cred.GetToken(ctx, opts) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index d077682c5c2..4cd8c514473 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -8,7 +8,7 @@ trigger: - release/* paths: include: - - sdk/azidentity/ + - sdk/azidentity/ pr: branches: @@ -19,17 +19,28 @@ pr: - release/* paths: include: - - sdk/azidentity/ + - sdk/azidentity/ -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml - parameters: - RunLiveTests: true - UsePipelineProxy: false - ServiceDirectory: 'azidentity' - CloudConfig: - Public: - SubscriptionConfigurations: - - $(sub-config-azure-cloud-test-resources) - # Contains alternate tenant, AAD app and cert info for testing - - $(sub-config-identity-test-resources) +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + CloudConfig: + Public: + SubscriptionConfigurations: + - $(sub-config-azure-cloud-test-resources) + - $(sub-config-identity-test-resources) + EnvVars: + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + RunLiveTests: true + ServiceDirectory: azidentity + UsePipelineProxy: false + + ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}: + MatrixConfigs: + - Name: managed_identity_matrix + GenerateVMJobs: true + Path: sdk/azidentity/managed-identity-matrix.json + Selection: sparse + MatrixReplace: + - Pool=.*LINUXPOOL.*/azsdk-pool-mms-ubuntu-2204-identitymsi + - OSVmImage=.*LINUXNEXTVMIMAGE.*/azsdk-pool-mms-ubuntu-2204-1espt diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go index fc3df68eb19..b588750ef33 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -23,7 +23,7 @@ const credNameAssertion = "ClientAssertionCredential" // the most common assertion scenario, authenticating a service principal with a certificate. See // [Microsoft Entra ID documentation] for details of the assertion format. // -// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/entra/identity-platform/certificate-credentials#assertion-format type ClientAssertionCredential struct { client *confidentialClient } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index 607533f486e..80cd96b560f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -51,7 +51,8 @@ type ClientCertificateCredential struct { client *confidentialClient } -// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. +// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. See +// [ParseCertificates] for help loading a certificate. func NewClientCertificateCredential(tenantID string, clientID string, certs []*x509.Certificate, key crypto.PrivateKey, options *ClientCertificateCredentialOptions) (*ClientCertificateCredential, error) { if len(certs) == 0 { return nil, errors.New("at least one certificate is required") @@ -86,8 +87,10 @@ func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy. return tk, err } -// ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. -// Pass nil for password if the private key isn't encrypted. This function can't decrypt keys in PEM format. +// ParseCertificates loads certificates and a private key, in PEM or PKCS#12 format, for use with [NewClientCertificateCredential]. +// Pass nil for password if the private key isn't encrypted. This function has limitations, for example it can't decrypt keys in +// PEM format or PKCS#12 certificates that use SHA256 for message authentication. If you encounter such limitations, consider +// using another module to load the certificate and private key. func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, crypto.PrivateKey, error) { var blocks []*pem.Block var err error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 854267bdbfd..3bd08c685fb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -91,7 +91,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque } tro.TenantID = tenant } - client, mu, err := c.client(ctx, tro) + client, mu, err := c.client(tro) if err != nil { return azcore.AccessToken{}, err } @@ -109,7 +109,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque if err != nil { // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code. // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError. - var unavailableErr *credentialUnavailableError + var unavailableErr credentialUnavailable if !errors.As(err, &unavailableErr) { res := getResponseFromError(err) err = newAuthenticationFailedError(c.name, err.Error(), res, err) @@ -121,7 +121,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err } -func (c *confidentialClient) client(ctx context.Context, tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) { +func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) { c.clientMu.Lock() defer c.clientMu.Unlock() if tro.EnableCAE { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 35aeef86747..551d3199462 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -8,10 +8,8 @@ package azidentity import ( "context" - "errors" "os" "strings" - "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" @@ -98,13 +96,13 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err}) } - o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions} + o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions, dac: true} if ID, ok := os.LookupEnv(azureClientID); ok { o.ID = ClientID(ID) } miCred, err := NewManagedIdentityCredential(o) if err == nil { - creds = append(creds, &timeoutWrapper{mic: miCred, timeout: time.Second}) + creds = append(creds, miCred) } else { errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error()) creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err}) @@ -158,51 +156,10 @@ type defaultCredentialErrorReporter struct { } func (d *defaultCredentialErrorReporter) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - if _, ok := d.err.(*credentialUnavailableError); ok { + if _, ok := d.err.(credentialUnavailable); ok { return azcore.AccessToken{}, d.err } return azcore.AccessToken{}, newCredentialUnavailableError(d.credType, d.err.Error()) } var _ azcore.TokenCredential = (*defaultCredentialErrorReporter)(nil) - -// timeoutWrapper prevents a potentially very long timeout when managed identity isn't available -type timeoutWrapper struct { - mic *ManagedIdentityCredential - // timeout applies to all auth attempts until one doesn't time out - timeout time.Duration -} - -// GetToken wraps DefaultAzureCredential's initial managed identity auth attempt with a short timeout -// because managed identity may not be available and connecting to IMDS can take several minutes to time out. -func (w *timeoutWrapper) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - var tk azcore.AccessToken - var err error - // no need to synchronize around this value because it's written only within ChainedTokenCredential's critical section - if w.timeout > 0 { - c, cancel := context.WithTimeout(ctx, w.timeout) - defer cancel() - tk, err = w.mic.GetToken(c, opts) - if isAuthFailedDueToContext(err) { - err = newCredentialUnavailableError(credNameManagedIdentity, "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information") - } else { - // some managed identity implementation is available, so don't apply the timeout to future calls - w.timeout = 0 - } - } else { - tk, err = w.mic.GetToken(ctx, opts) - } - return tk, err -} - -// unwraps nested AuthenticationFailedErrors to get the root error -func isAuthFailedDueToContext(err error) bool { - for { - var authFailedErr *AuthenticationFailedError - if !errors.As(err, &authFailedErr) { - break - } - err = authFailedErr.err - } - return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go index d8b952f532e..be963d3a2af 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go @@ -19,7 +19,7 @@ const cliTimeout = 10 * time.Second // the next credential in its chain (another developer credential). func unavailableIfInChain(err error, inDefaultChain bool) error { if err != nil && inDefaultChain { - var unavailableErr *credentialUnavailableError + var unavailableErr credentialUnavailable if !errors.As(err, &unavailableErr) { err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error()) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index 1b7a283703a..cd30bedd5e9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -34,8 +34,8 @@ type DeviceCodeCredentialOptions struct { ClientID string // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, [DeviceCodeCredential.GetToken] will return [ErrAuthenticationRequired] when user - // interaction is necessary to acquire a token. + // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // to acquire a token. disableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go index 42f84875e23..b30f5474f55 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -57,6 +57,9 @@ type EnvironmentCredentialOptions struct { // // AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the certificate file. // +// Note that this credential uses [ParseCertificates] to load the certificate and key from the file. If this +// function isn't able to parse your certificate, use [ClientCertificateCredential] instead. +// // # User with username and password // // AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". @@ -121,7 +124,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme } certs, key, err := ParseCertificates(certData, password) if err != nil { - return nil, fmt.Errorf(`failed to load certificate from "%s": %v`, certPath, err) + return nil, fmt.Errorf("failed to parse %q due to error %q. This may be due to a limitation of this module's certificate loader. Consider calling NewClientCertificateCredential instead", certPath, err.Error()) } o := &ClientCertificateCredentialOptions{ AdditionallyAllowedTenants: additionalTenants, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index 335d2b7dcf2..698650bbb62 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -13,15 +13,12 @@ import ( "fmt" "net/http" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" ) -// errAuthenticationRequired indicates a credential's Authenticate method must be called to acquire a token -// because user interaction is required and the credential is configured not to automatically prompt the user. -var errAuthenticationRequired error = &credentialUnavailableError{"can't acquire a token without user interaction. Call Authenticate to interactively authenticate a user"} - // getResponseFromError retrieves the response carried by // an AuthenticationFailedError or MSAL CallErr, if any func getResponseFromError(err error) *http.Response { @@ -56,7 +53,7 @@ func (e *AuthenticationFailedError) Error() string { return e.credType + ": " + e.message } msg := &bytes.Buffer{} - fmt.Fprintf(msg, e.credType+" authentication failed\n") + fmt.Fprintf(msg, "%s authentication failed. %s\n", e.credType, e.message) if e.RawResponse.Request != nil { fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) } else { @@ -110,8 +107,34 @@ func (*AuthenticationFailedError) NonRetriable() { var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) -// credentialUnavailableError indicates a credential can't attempt authentication because it lacks required -// data or state +// authenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token +// because the credential requires user interaction and is configured not to request it automatically. +type authenticationRequiredError struct { + credentialUnavailableError + + // TokenRequestOptions for the required token. Pass this to the credential's Authenticate method. + TokenRequestOptions policy.TokenRequestOptions +} + +func newauthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error { + return &authenticationRequiredError{ + credentialUnavailableError: credentialUnavailableError{ + credType + " can't acquire a token without user interaction. Call Authenticate to authenticate a user interactively", + }, + TokenRequestOptions: tro, + } +} + +var ( + _ credentialUnavailable = (*authenticationRequiredError)(nil) + _ errorinfo.NonRetriable = (*authenticationRequiredError)(nil) +) + +type credentialUnavailable interface { + error + credentialUnavailable() +} + type credentialUnavailableError struct { message string } @@ -135,6 +158,11 @@ func (e *credentialUnavailableError) Error() string { } // NonRetriable is a marker method indicating this error should not be retried. It has no implementation. -func (e *credentialUnavailableError) NonRetriable() {} +func (*credentialUnavailableError) NonRetriable() {} + +func (*credentialUnavailableError) credentialUnavailable() {} -var _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) +var ( + _ credentialUnavailable = (*credentialUnavailableError)(nil) + _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum index 65bcba7dfea..c592f283b6b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum @@ -3,12 +3,20 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9an github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/keybase/dbus v0.0.0-20220506165403-5aa21ea2c23a/go.mod h1:YPNKjjE7Ubp9dTbnWvsP3HT+hYnY6TfXzubYTBeUxc8= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -16,14 +24,19 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -35,7 +48,13 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index bd829698375..056785a8a33 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -33,8 +33,8 @@ type InteractiveBrowserCredentialOptions struct { ClientID string // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, [InteractiveBrowserCredential.GetToken] will return [ErrAuthenticationRequired] when - // user interaction is necessary to acquire a token. + // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // to acquire a token. disableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json new file mode 100644 index 00000000000..1c3791777a1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json @@ -0,0 +1,17 @@ +{ + "include": [ + { + "Agent": { + "msi_image": { + "ArmTemplateParameters": "@{deployResources = $true}", + "OSVmImage": "env:LINUXNEXTVMIMAGE", + "Pool": "env:LINUXPOOL" + } + }, + "GoVersion": [ + "1.22.1" + ], + "IDENTITY_IMDS_AVAILABLE": "1" + } + ] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index d129a1e91c2..6122cc70050 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -14,13 +14,15 @@ import ( "net/http" "net/url" "os" + "path/filepath" + "runtime" "strconv" "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" @@ -44,6 +46,8 @@ const ( serviceFabricAPIVersion = "2019-07-01-preview" ) +var imdsProbeTimeout = time.Second + type msiType int const ( @@ -55,13 +59,28 @@ const ( msiTypeServiceFabric ) -// managedIdentityClient provides the base for authenticating in managed identity environments -// This type includes an runtime.Pipeline and TokenCredentialOptions. type managedIdentityClient struct { - azClient *azcore.Client - msiType msiType - endpoint string - id ManagedIDKind + azClient *azcore.Client + endpoint string + id ManagedIDKind + msiType msiType + probeIMDS bool +} + +// arcKeyDirectory returns the directory expected to contain Azure Arc keys +var arcKeyDirectory = func() (string, error) { + switch runtime.GOOS { + case "linux": + return "/var/opt/azcmagent/tokens", nil + case "windows": + pd := os.Getenv("ProgramData") + if pd == "" { + return "", errors.New("environment variable ProgramData has no value") + } + return filepath.Join(pd, "AzureConnectedMachineAgent", "Tokens"), nil + default: + return "", fmt.Errorf("unsupported OS %q", runtime.GOOS) + } } type wrappedNumber json.Number @@ -88,7 +107,7 @@ func setIMDSRetryOptionDefaults(o *policy.RetryOptions) { if o.StatusCodes == nil { o.StatusCodes = []int{ // IMDS docs recommend retrying 404, 410, 429 and 5xx - // https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling + // https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-use-vm-token#error-handling http.StatusNotFound, // 404 http.StatusGone, // 410 http.StatusTooManyRequests, // 429 @@ -147,11 +166,12 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag c.msiType = msiTypeCloudShell } } else { + c.probeIMDS = options.dac setIMDSRetryOptionDefaults(&cp.Retry) } - client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ - Tracing: runtime.TracingOptions{ + client, err := azcore.NewClient(module, version, azruntime.PipelineOptions{ + Tracing: azruntime.TracingOptions{ Namespace: traceNamespace, }, }, &cp) @@ -180,6 +200,27 @@ func (c *managedIdentityClient) provideToken(ctx context.Context, params confide // authenticate acquires an access token func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { + // no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client, + // and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block + if c.probeIMDS { + cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout) + defer cancel() + cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) + req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint) + if err == nil { + _, err = c.azClient.Pipeline().Do(req) + } + if err != nil { + msg := err.Error() + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information" + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) + } + // send normal token requests from now on because something responded + c.probeIMDS = false + } + msg, err := c.createAuthRequest(ctx, id, scopes) if err != nil { return azcore.AccessToken{}, err @@ -190,7 +231,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) } - if runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { return c.createAccessToken(resp) } @@ -201,15 +242,15 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) } msg := "failed to authenticate a system assigned identity" - if body, err := runtime.Payload(resp); err == nil && len(body) > 0 { + if body, err := azruntime.Payload(resp); err == nil && len(body) > 0 { msg += fmt.Sprintf(". The endpoint responded with %s", body) } return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) case http.StatusForbidden: // Docker Desktop runs a proxy that responds 403 to IMDS token requests. If we get that response, // we return credentialUnavailableError so credential chains continue to their next credential - body, err := runtime.Payload(resp) - if err == nil && strings.Contains(string(body), "A socket operation was attempted to an unreachable network") { + body, err := azruntime.Payload(resp) + if err == nil && strings.Contains(string(body), "unreachable") { return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body))) } } @@ -226,7 +267,7 @@ func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.Ac ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string }{} - if err := runtime.UnmarshalAsJSON(res, &value); err != nil { + if err := azruntime.UnmarshalAsJSON(res, &value); err != nil { return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) } if value.ExpiresIn != "" { @@ -276,7 +317,7 @@ func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id Manage } func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -296,7 +337,7 @@ func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id Ma } func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -316,7 +357,7 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, } func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -339,7 +380,7 @@ func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id } func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -362,7 +403,7 @@ func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Conte func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) { // create the request to retreive the secret key challenge provided by the HIMDS service - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return "", err } @@ -384,22 +425,36 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour } header := response.Header.Get("WWW-Authenticate") if len(header) == 0 { - return "", errors.New("did not receive a value from WWW-Authenticate header") + return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil, nil) } // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key - pos := strings.LastIndex(header, "=") - if pos == -1 { - return "", fmt.Errorf("did not receive a correct value from WWW-Authenticate header: %s", header) + _, p, found := strings.Cut(header, "=") + if !found { + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil, nil) + } + expected, err := arcKeyDirectory() + if err != nil { + return "", err + } + if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") { + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil, nil) + } + f, err := os.Stat(p) + if err != nil { + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil, nil) + } + if s := f.Size(); s > 4096 { + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil, nil) } - key, err := os.ReadFile(header[pos+1:]) + key, err := os.ReadFile(p) if err != nil { - return "", fmt.Errorf("could not read file (%s) contents: %v", header[pos+1:], err) + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil, nil) } return string(key), nil } func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -421,7 +476,7 @@ func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, i } func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodPost, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index dcd278befa1..13c043d8e0c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -64,12 +64,19 @@ type ManagedIdentityCredentialOptions struct { // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that // some platforms don't accept resource IDs. ID ManagedIDKind + + // dac indicates whether the credential is part of DefaultAzureCredential. When true, and the environment doesn't have + // configuration for a specific managed identity API, the credential tries to determine whether IMDS is available before + // sending its first token request. It does this by sending a malformed request with a short timeout. Any response to that + // request is taken to mean IMDS is available, in which case the credential will send ordinary token requests thereafter + // with no special timeout. The purpose of this behavior is to prevent a very long timeout when IMDS isn't available. + dac bool } // ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. // This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a // user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities: -// https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +// https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { client *confidentialClient mic *managedIdentityClient diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go index 5e67cf02145..9dcc82f013b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -10,6 +10,7 @@ import ( "context" "crypto" "crypto/x509" + "errors" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" @@ -24,7 +25,7 @@ const credNameOBO = "OnBehalfOfCredential" // is not an interactive authentication flow, an application using it must have admin consent for any delegated // permissions before requesting tokens for them. See [Microsoft Entra ID documentation] for more details. // -// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/entra/identity-platform/v2-oauth2-on-behalf-of-flow type OnBehalfOfCredential struct { client *confidentialClient } @@ -60,6 +61,19 @@ func NewOnBehalfOfCredentialWithCertificate(tenantID, clientID, userAssertion st return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) } +// NewOnBehalfOfCredentialWithClientAssertions constructs an OnBehalfOfCredential that authenticates with client assertions. +// userAssertion is the user's access token for the application. The getAssertion function should return client assertions +// that authenticate the application to Microsoft Entra ID, such as federated credentials. +func NewOnBehalfOfCredentialWithClientAssertions(tenantID, clientID, userAssertion string, getAssertion func(context.Context) (string, error), options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + if getAssertion == nil { + return nil, errors.New("getAssertion can't be nil. It must be a function that returns client assertions") + } + cred := confidential.NewCredFromAssertionCallback(func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) { + return getAssertion(ctx) + }) + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + // NewOnBehalfOfCredentialWithSecret constructs an OnBehalfOfCredential that authenticates with a client secret. func NewOnBehalfOfCredentialWithSecret(tenantID, clientID, userAssertion, clientSecret string, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { cred, err := confidential.NewCredFromSecret(clientSecret) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index 63c31190d18..b3d22dbf3ce 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -152,7 +152,7 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti return p.token(ar, err) } if p.opts.DisableAutomaticAuthentication { - return azcore.AccessToken{}, errAuthenticationRequired + return azcore.AccessToken{}, newauthenticationRequiredError(p.name, tro) } at, err := p.reqToken(ctx, client, tro) if err == nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 new file mode 100644 index 00000000000..a69bbce34c4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -0,0 +1,112 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# IMPORTANT: Do not invoke this file directly. Please instead run eng/common/TestResources/New-TestResources.ps1 from the repository root. + +param ( + [hashtable] $AdditionalParameters = @{}, + [hashtable] $DeploymentOutputs +) + +$ErrorActionPreference = 'Stop' +$PSNativeCommandUseErrorActionPreference = $true + +if ($CI) { + if (!$AdditionalParameters['deployResources']) { + Write-Host "Skipping post-provisioning script because resources weren't deployed" + return + } + az login --service-principal -u $DeploymentOutputs['AZIDENTITY_CLIENT_ID'] -p $DeploymentOutputs['AZIDENTITY_CLIENT_SECRET'] --tenant $DeploymentOutputs['AZIDENTITY_TENANT_ID'] + az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID'] +} + +Write-Host "Building container" +$image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test" +Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @" +FROM mcr.microsoft.com/oss/go/microsoft/golang:latest as builder +ENV GOARCH=amd64 GOWORK=off +COPY . /azidentity +WORKDIR /azidentity/testdata/managed-id-test +RUN go mod tidy +RUN go build -o /build/managed-id-test . +RUN GOOS=windows go build -o /build/managed-id-test.exe . + +FROM mcr.microsoft.com/mirror/docker/library/alpine:3.16 +RUN apk add gcompat +COPY --from=builder /build/* . +RUN chmod +x managed-id-test +CMD ["./managed-id-test"] +"@ +# build from sdk/azidentity because we need that dir in the context (because the test app uses local azidentity) +docker build -t $image "$PSScriptRoot" +az acr login -n $DeploymentOutputs['AZIDENTITY_ACR_NAME'] +docker push $image + +$rg = $DeploymentOutputs['AZIDENTITY_RESOURCE_GROUP'] + +# ACI is easier to provision here than in the bicep file because the image isn't available before now +Write-Host "Deploying Azure Container Instance" +$aciName = "azidentity-test" +az container create -g $rg -n $aciName --image $image ` + --acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + --assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + --role "Storage Blob Data Reader" ` + --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) ` + -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) ` + AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + FUNCTIONS_CUSTOMHANDLER_PORT=80 +Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName" + +# Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip +Write-Host "Deploying to Azure Functions" +$container = docker create $image +docker cp ${container}:managed-id-test.exe "$PSScriptRoot/testdata/managed-id-test/" +docker rm -v $container +Compress-Archive -Path "$PSScriptRoot/testdata/managed-id-test/*" -DestinationPath func.zip -Force +az functionapp deploy -g $rg -n $DeploymentOutputs['AZIDENTITY_FUNCTION_NAME'] --src-path func.zip --type zip + +Write-Host "Creating federated identity" +$aksName = $DeploymentOutputs['AZIDENTITY_AKS_NAME'] +$idName = $DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME'] +$issuer = az aks show -g $rg -n $aksName --query "oidcIssuerProfile.issuerUrl" -otsv +$podName = "azidentity-test" +$serviceAccountName = "workload-identity-sa" +az identity federated-credential create -g $rg --identity-name $idName --issuer $issuer --name $idName --subject system:serviceaccount:default:$serviceAccountName +Write-Host "Deploying to AKS" +az aks get-credentials -g $rg -n $aksName +az aks update --attach-acr $DeploymentOutputs['AZIDENTITY_ACR_NAME'] -g $rg -n $aksName +Set-Content -Path "$PSScriptRoot/k8s.yaml" -Value @" +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + azure.workload.identity/client-id: $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) + name: $serviceAccountName + namespace: default +--- +apiVersion: v1 +kind: Pod +metadata: + name: $podName + namespace: default + labels: + app: $podName + azure.workload.identity/use: "true" +spec: + serviceAccountName: $serviceAccountName + containers: + - name: $podName + image: $image + env: + - name: AZIDENTITY_STORAGE_NAME + value: $($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) + - name: AZIDENTITY_USE_WORKLOAD_IDENTITY + value: "true" + - name: FUNCTIONS_CUSTOMHANDLER_PORT + value: "80" + nodeSelector: + kubernetes.io/os: linux +"@ +kubectl apply -f "$PSScriptRoot/k8s.yaml" +Write-Host "##vso[task.setvariable variable=AZIDENTITY_POD_NAME;]$podName" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 index fe0183addeb..58766d0a022 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 @@ -1,36 +1,44 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# IMPORTANT: Do not invoke this file directly. Please instead run eng/common/TestResources/New-TestResources.ps1 from the repository root. + [CmdletBinding(SupportsShouldProcess = $true, ConfirmImpact = 'Medium')] param ( + [hashtable] $AdditionalParameters = @{}, + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). [Parameter(ValueFromRemainingArguments = $true)] $RemainingArguments ) +if (-not (Test-Path "$PSScriptRoot/sshkey.pub")) { + ssh-keygen -t rsa -b 4096 -f "$PSScriptRoot/sshkey" -N '' -C '' +} +$templateFileParameters['sshPubKey'] = Get-Content "$PSScriptRoot/sshkey.pub" + if (!$CI) { # TODO: Remove this once auto-cloud config downloads are supported locally Write-Host "Skipping cert setup in local testing mode" return } -if ($EnvironmentVariables -eq $null -or $EnvironmentVariables.Count -eq 0) { +if ($null -eq $EnvironmentVariables -or $EnvironmentVariables.Count -eq 0) { throw "EnvironmentVariables must be set in the calling script New-TestResources.ps1" } $tmp = $env:TEMP ? $env:TEMP : [System.IO.Path]::GetTempPath() $pfxPath = Join-Path $tmp "test.pfx" $pemPath = Join-Path $tmp "test.pem" -$sniPath = Join-Path $tmp "testsni.pfx" -Write-Host "Creating identity test files: $pfxPath $pemPath $sniPath" +Write-Host "Creating identity test files: $pfxPath $pemPath" [System.Convert]::FromBase64String($EnvironmentVariables['PFX_CONTENTS']) | Set-Content -Path $pfxPath -AsByteStream Set-Content -Path $pemPath -Value $EnvironmentVariables['PEM_CONTENTS'] -[System.Convert]::FromBase64String($EnvironmentVariables['SNI_CONTENTS']) | Set-Content -Path $sniPath -AsByteStream # Set for pipeline Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PFX;]$pfxPath" Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PEM;]$pemPath" -Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_SNI;]$sniPath" # Set for local $env:IDENTITY_SP_CERT_PFX = $pfxPath $env:IDENTITY_SP_CERT_PEM = $pemPath -$env:IDENTITY_SP_CERT_SNI = $sniPath diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep index b3490d3b50a..2a216529309 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep @@ -1 +1,219 @@ -param baseName string +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +@description('Kubernetes cluster admin user name.') +param adminUser string = 'azureuser' + +@minLength(6) +@maxLength(23) +@description('The base resource name.') +param baseName string = resourceGroup().name + +@description('Whether to deploy resources. When set to false, this file deploys nothing.') +param deployResources bool = false + +param sshPubKey string = '' + +@description('The location of the resource. By default, this is the same as the resource group.') +param location string = resourceGroup().location + +// https://learn.microsoft.com/azure/role-based-access-control/built-in-roles +var acrPull = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') +var blobReader = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1') + +resource sa 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) { + kind: 'StorageV2' + location: location + name: 'sa${uniqueString(baseName)}' + properties: { + accessTier: 'Hot' + } + sku: { + name: 'Standard_LRS' + } +} + +resource saUserAssigned 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) { + kind: 'StorageV2' + location: location + name: 'sa2${uniqueString(baseName)}' + properties: { + accessTier: 'Hot' + } + sku: { + name: 'Standard_LRS' + } +} + +resource usermgdid 'Microsoft.ManagedIdentity/userAssignedIdentities@2018-11-30' = if (deployResources) { + location: location + name: baseName +} + +resource acrPullContainerInstance 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + name: guid(resourceGroup().id, acrPull, 'containerInstance') + properties: { + principalId: deployResources ? usermgdid.properties.principalId : '' + principalType: 'ServicePrincipal' + roleDefinitionId: acrPull + } + scope: containerRegistry +} + +resource blobRoleUserAssigned 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + scope: saUserAssigned + name: guid(resourceGroup().id, blobReader, usermgdid.id) + properties: { + principalId: deployResources ? usermgdid.properties.principalId : '' + principalType: 'ServicePrincipal' + roleDefinitionId: blobReader + } +} + +resource blobRoleFunc 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + name: guid(resourceGroup().id, blobReader, 'azfunc') + properties: { + principalId: deployResources ? azfunc.identity.principalId : '' + roleDefinitionId: blobReader + principalType: 'ServicePrincipal' + } + scope: sa +} + +resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-01-01-preview' = if (deployResources) { + location: location + name: uniqueString(resourceGroup().id) + properties: { + adminUserEnabled: true + } + sku: { + name: 'Basic' + } +} + +resource farm 'Microsoft.Web/serverfarms@2021-03-01' = if (deployResources) { + kind: 'app' + location: location + name: '${baseName}_asp' + properties: {} + sku: { + capacity: 1 + family: 'B' + name: 'B1' + size: 'B1' + tier: 'Basic' + } +} + +resource azfunc 'Microsoft.Web/sites@2021-03-01' = if (deployResources) { + identity: { + type: 'SystemAssigned, UserAssigned' + userAssignedIdentities: { + '${deployResources ? usermgdid.id : ''}': {} + } + } + kind: 'functionapp' + location: location + name: '${baseName}func' + properties: { + enabled: true + httpsOnly: true + keyVaultReferenceIdentity: 'SystemAssigned' + serverFarmId: farm.id + siteConfig: { + alwaysOn: true + appSettings: [ + { + name: 'AZIDENTITY_STORAGE_NAME' + value: deployResources ? sa.name : null + } + { + name: 'AZIDENTITY_STORAGE_NAME_USER_ASSIGNED' + value: deployResources ? saUserAssigned.name : null + } + { + name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY' + value: deployResources ? usermgdid.id : null + } + { + name: 'AzureWebJobsStorage' + value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}' + } + { + name: 'FUNCTIONS_EXTENSION_VERSION' + value: '~4' + } + { + name: 'FUNCTIONS_WORKER_RUNTIME' + value: 'custom' + } + { + name: 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING' + value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}' + } + { + name: 'WEBSITE_CONTENTSHARE' + value: toLower('${baseName}-func') + } + ] + http20Enabled: true + minTlsVersion: '1.2' + } + } +} + +resource aks 'Microsoft.ContainerService/managedClusters@2023-06-01' = if (deployResources) { + name: baseName + location: location + identity: { + type: 'SystemAssigned' + } + properties: { + agentPoolProfiles: [ + { + count: 1 + enableAutoScaling: false + kubeletDiskType: 'OS' + mode: 'System' + name: 'agentpool' + osDiskSizeGB: 128 + osDiskType: 'Managed' + osSKU: 'Ubuntu' + osType: 'Linux' + type: 'VirtualMachineScaleSets' + vmSize: 'Standard_D2s_v3' + } + ] + dnsPrefix: 'identitytest' + enableRBAC: true + linuxProfile: { + adminUsername: adminUser + ssh: { + publicKeys: [ + { + keyData: sshPubKey + } + ] + } + } + oidcIssuerProfile: { + enabled: true + } + securityProfile: { + workloadIdentity: { + enabled: true + } + } + } +} + +output AZIDENTITY_ACR_LOGIN_SERVER string = deployResources ? containerRegistry.properties.loginServer : '' +output AZIDENTITY_ACR_NAME string = deployResources ? containerRegistry.name : '' +output AZIDENTITY_AKS_NAME string = deployResources ? aks.name : '' +output AZIDENTITY_FUNCTION_NAME string = deployResources ? azfunc.name : '' +output AZIDENTITY_STORAGE_ID string = deployResources ? sa.id : '' +output AZIDENTITY_STORAGE_NAME string = deployResources ? sa.name : '' +output AZIDENTITY_STORAGE_NAME_USER_ASSIGNED string = deployResources ? saUserAssigned.name : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : '' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 9b9d7ae0d20..459ef64c6f7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.5.2" + version = "v1.6.0" ) diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d9733..00000000000 --- a/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md index 6062a4dacd4..eab5dbf7ba7 100644 --- a/vendor/github.com/google/btree/README.md +++ b/vendor/github.com/google/btree/README.md @@ -1,7 +1,5 @@ # BTree implementation for Go -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - This package provides an in-memory B-Tree implementation for Go, useful as an ordered, mutable data structure. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go index b83acdbc6d3..969b910d706 100644 --- a/vendor/github.com/google/btree/btree.go +++ b/vendor/github.com/google/btree/btree.go @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.18 +// +build !go1.18 + // Package btree implements in-memory B-Trees of arbitrary degree. // // btree implements an in-memory B-Tree for use as an ordered data structure. diff --git a/vendor/github.com/google/btree/btree_generic.go b/vendor/github.com/google/btree/btree_generic.go new file mode 100644 index 00000000000..e44a0f48804 --- /dev/null +++ b/vendor/github.com/google/btree/btree_generic.go @@ -0,0 +1,1083 @@ +// Copyright 2014-2022 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific +// instantiation of that generic for the Item interface, with a backwards- +// compatible API. Before go1.18, generics are not supported, +// and BTree is just an implementation based around the Item interface. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +// +// There are two implementations; those suffixed with 'G' are generics, usable +// for any type, and require a passed-in "less" function to define their ordering. +// Those without this prefix are specific to the 'Item' interface, and use +// its 'Less' function for ordering. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +// FreeListG represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList, in particular when they're created with Clone. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeListG[T any] struct { + mu sync.Mutex + freelist []*node[T] +} + +// NewFreeListG creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeListG[T any](size int) *FreeListG[T] { + return &FreeListG[T]{freelist: make([]*node[T], 0, size)} +} + +func (f *FreeListG[T]) newNode() (n *node[T]) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node[T]) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIteratorG[T any] func(item T) bool + +// Ordered represents the set of types for which the '<' operator work. +type Ordered interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string +} + +// Less[T] returns a default LessFunc that uses the '<' operator for types that support it. +func Less[T Ordered]() LessFunc[T] { + return func(a, b T) bool { return a < b } +} + +// NewOrderedG creates a new B-Tree for ordered types. +func NewOrderedG[T Ordered](degree int) *BTreeG[T] { + return NewG[T](degree, Less[T]()) +} + +// NewG creates a new B-Tree with the given degree. +// +// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +// +// The passed-in LessFunc determines how objects of type T are ordered. +func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] { + return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize)) +} + +// NewWithFreeListG creates a new B-Tree that uses the given node free list. +func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] { + if degree <= 1 { + panic("bad degree") + } + return &BTreeG[T]{ + degree: degree, + cow: ©OnWriteContext[T]{freelist: f, less: less}, + } +} + +// items stores items in a node. +type items[T any] []T + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items[T]) insertAt(index int, item T) { + var zero T + *s = append(*s, zero) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items[T]) removeAt(index int) T { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + var zero T + (*s)[len(*s)-1] = zero + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items[T]) pop() (out T) { + index := len(*s) - 1 + out = (*s)[index] + var zero T + (*s)[index] = zero + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items[T]) truncate(index int) { + var toClear items[T] + *s, toClear = (*s)[:index], (*s)[index:] + var zero T + for i := 0; i < len(toClear); i++ { + toClear[i] = zero + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return less(item, s[i]) + }) + if i > 0 && !less(s[i-1], item) { + return i - 1, true + } + return i, false +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node[T any] struct { + items items[T] + children items[*node[T]] + cow *copyOnWriteContext[T] +} + +func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items[T], len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(items[*node[T]], len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node[T]) mutableChild(i int) *node[T] { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node[T]) split(i int) (T, *node[T]) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node[T]) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) { + i, found := n.items.find(item, n.cow.less) + if found { + out := n.items[i] + n.items[i] = item + return out, true + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case n.cow.less(item, inTree): + // no change, we want first split node + case n.cow.less(inTree, item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out, true + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node[T]) get(key T) (_ T, _ bool) { + i, found := n.items.find(key, n.cow.less) + if found { + return n.items[i], true + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return +} + +// min returns the first item in the subtree. +func min[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return + } + return n.items[0], true +} + +// max returns the last item in the subtree. +func max[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return + } + return n.items[len(n.items)-1], true +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop(), true + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0), true + } + i = 0 + case removeItem: + i, found = n.items.find(item, n.cow.less) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i), true + } + return + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + var zero T + n.items[i], _ = child.remove(zero, minItems, removeMax) + return out, true + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +type optionalItem[T any] struct { + item T + valid bool +} + +func optional[T any](item T) optionalItem[T] { + return optionalItem[T]{item: item, valid: true} +} +func empty[T any]() optionalItem[T] { + return optionalItem[T]{} +} + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start.valid { + index, _ = n.items.find(start.item, n.cow.less) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) { + hit = true + continue + } + hit = true + if stop.valid && !n.cow.less(n.items[i], stop.item) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start.valid { + index, found = n.items.find(start.item, n.cow.less) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start.valid && !n.cow.less(n.items[i], start.item) { + if !includeStart || hit || n.cow.less(start.item, n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop.valid && !n.cow.less(stop.item, n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// print is used for testing/debugging purposes. +func (n *node[T]) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTreeG is a generic implementation of a B-Tree. +// +// BTreeG stores items of type T in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTreeG[T any] struct { + degree int + length int + root *node[T] + cow *copyOnWriteContext[T] +} + +// LessFunc[T] determines how to order a type 'T'. It should implement a strict +// ordering, and should return true if within that ordering, 'a' < 'b'. +type LessFunc[T any] func(a, b T) bool + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext[T any] struct { + freelist *FreeListG[T] + less LessFunc[T] +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTreeG[T]) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTreeG[T]) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext[T]) newNode() (n *node[T]) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned, +// and the second return value is true. Otherwise, (zeroValue, false) +// +// nil cannot be added to the tree (will panic). +func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) { + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out, outb := t.root.insert(item, t.maxItems()) + if !outb { + t.length++ + } + return out, outb +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) Delete(item T) (T, bool) { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMin() (T, bool) { + var zero T + return t.deleteItem(zero, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMax() (T, bool) { + var zero T + return t.deleteItem(zero, removeMax) +} + +func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) { + if t.root == nil || len(t.root.items) == 0 { + return + } + t.root = t.root.mutableFor(t.cow) + out, outb := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if outb { + t.length-- + } + return out, outb +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns +// (zeroValue, false) if unable to find that item. +func (t *BTreeG[T]) Get(key T) (_ T, _ bool) { + if t.root == nil { + return + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Min() (_ T, _ bool) { + return min(t.root) +} + +// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Max() (_ T, _ bool) { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTreeG[T]) Has(key T) bool { + _, ok := t.Get(key) + return ok +} + +// Len returns the number of items currently in the tree. +func (t *BTreeG[T]) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTreeG[T]) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node[T]) reset(c *copyOnWriteContext[T]) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree BTreeG[Item] + +var itemLess LessFunc[Item] = func(a, b Item) bool { + return a.Less(b) +} + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return (*BTree)(NewG[Item](degree, itemLess)) +} + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList FreeListG[Item] + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return (*FreeList)(NewFreeListG[Item](size)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f))) +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator ItemIteratorG[Item] + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + return (*BTree)((*BTreeG[Item])(t).Clone()) +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + i, _ := (*BTreeG[Item])(t).Delete(item) + return i +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + i, _ := (*BTreeG[Item])(t).DeleteMax() + return i +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + i, _ := (*BTreeG[Item])(t).DeleteMin() + return i +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + i, _ := (*BTreeG[Item])(t).Get(key) + return i +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + i, _ := (*BTreeG[Item])(t).Max() + return i +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + i, _ := (*BTreeG[Item])(t).Min() + return i +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return (*BTreeG[Item])(t).Has(key) +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item) + return i +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator)) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + (*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator)) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator)) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + (*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator)) +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return (*BTreeG[Item])(t).Len() +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + (*BTreeG[Item])(t).Clear(addNodesToFreelist) +} diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md index 21a17c5af39..983d44c7db4 100644 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -140,9 +140,10 @@ log.Printf("[DEBUG] %d", 42) ... [DEBUG] my-app: 42 ``` -Notice that if `appLogger` is initialized with the `INFO` log level _and_ you +Notice that if `appLogger` is initialized with the `INFO` log level, _and_ you specify `InferLevels: true`, you will not see any output here. You must change `appLogger` to `DEBUG` to see output. See the docs for more information. If the log lines start with a timestamp you can use the -`InferLevelsWithTimestamp` option to try and ignore them. +`InferLevelsWithTimestamp` option to try and ignore them. Please note that in order +for `InferLevelsWithTimestamp` to be relevant, `InferLevels` must be set to `true`. diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index b45064acf1a..272a710c04c 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -55,23 +55,38 @@ var ( faintBoldColor = color.New(color.Faint, color.Bold) faintColor = color.New(color.Faint) - faintMultiLinePrefix = faintColor.Sprint(" | ") - faintFieldSeparator = faintColor.Sprint("=") - faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n") + faintMultiLinePrefix string + faintFieldSeparator string + faintFieldSeparatorWithNewLine string ) +func init() { + // Force all the colors to enabled because we do our own detection of color usage. + for _, c := range _levelToColor { + c.EnableColor() + } + + faintBoldColor.EnableColor() + faintColor.EnableColor() + + faintMultiLinePrefix = faintColor.Sprint(" | ") + faintFieldSeparator = faintColor.Sprint("=") + faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n") +} + // Make sure that intLogger is a Logger var _ Logger = &intLogger{} // intLogger is an internal logger implementation. Internal in that it is // defined entirely by this package. type intLogger struct { - json bool - callerOffset int - name string - timeFormat string - timeFn TimeFunction - disableTime bool + json bool + jsonEscapeEnabled bool + callerOffset int + name string + timeFormat string + timeFn TimeFunction + disableTime bool // This is an interface so that it's shared by any derived loggers, since // those derived loggers share the bufio.Writer as well. @@ -79,6 +94,19 @@ type intLogger struct { writer *writer level *int32 + // The value of curEpoch when our level was set + setEpoch uint64 + + // The value of curEpoch the last time we performed the level sync process + ownEpoch uint64 + + // Shared amongst all the loggers created in this hierachy, used to determine + // if the level sync process should be run by comparing it with ownEpoch + curEpoch *uint64 + + // The logger this one was created from. Only set when syncParentLevel is set + parent *intLogger + headerColor ColorOption fieldColor ColorOption @@ -88,6 +116,7 @@ type intLogger struct { // create subloggers with their own level setting independentLevels bool + syncParentLevel bool subloggerHook func(sub Logger) Logger } @@ -129,9 +158,9 @@ func newLogger(opts *LoggerOptions) *intLogger { } var ( - primaryColor ColorOption = ColorOff - headerColor ColorOption = ColorOff - fieldColor ColorOption = ColorOff + primaryColor = ColorOff + headerColor = ColorOff + fieldColor = ColorOff ) switch { case opts.ColorHeaderOnly: @@ -145,6 +174,7 @@ func newLogger(opts *LoggerOptions) *intLogger { l := &intLogger{ json: opts.JSONFormat, + jsonEscapeEnabled: !opts.JSONEscapeDisabled, name: opts.Name, timeFormat: TimeFormat, timeFn: time.Now, @@ -152,8 +182,10 @@ func newLogger(opts *LoggerOptions) *intLogger { mutex: mutex, writer: newWriter(output, primaryColor), level: new(int32), + curEpoch: new(uint64), exclude: opts.Exclude, independentLevels: opts.IndependentLevels, + syncParentLevel: opts.SyncParentLevel, headerColor: headerColor, fieldColor: fieldColor, subloggerHook: opts.SubloggerHook, @@ -194,7 +226,7 @@ const offsetIntLogger = 3 // Log a message and a set of key/value pairs if the given level is at // or more severe that the threshold configured in the Logger. func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { - if level < Level(atomic.LoadInt32(l.level)) { + if level < l.GetLevel() { return } @@ -597,7 +629,7 @@ func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, a vals := l.jsonMapEntry(t, name, level, msg) args = append(l.implied, args...) - if args != nil && len(args) > 0 { + if len(args) > 0 { if len(args)%2 != 0 { cs, ok := args[len(args)-1].(CapturedStacktrace) if ok { @@ -637,13 +669,17 @@ func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, a } } - err := json.NewEncoder(l.writer).Encode(vals) + encoder := json.NewEncoder(l.writer) + encoder.SetEscapeHTML(l.jsonEscapeEnabled) + err := encoder.Encode(vals) if err != nil { if _, ok := err.(*json.UnsupportedTypeError); ok { plainVal := l.jsonMapEntry(t, name, level, msg) plainVal["@warn"] = errJsonUnsupportedTypeMsg - json.NewEncoder(l.writer).Encode(plainVal) + errEncoder := json.NewEncoder(l.writer) + errEncoder.SetEscapeHTML(l.jsonEscapeEnabled) + errEncoder.Encode(plainVal) } } } @@ -718,27 +754,27 @@ func (l *intLogger) Error(msg string, args ...interface{}) { // Indicate that the logger would emit TRACE level logs func (l *intLogger) IsTrace() bool { - return Level(atomic.LoadInt32(l.level)) == Trace + return l.GetLevel() == Trace } // Indicate that the logger would emit DEBUG level logs func (l *intLogger) IsDebug() bool { - return Level(atomic.LoadInt32(l.level)) <= Debug + return l.GetLevel() <= Debug } // Indicate that the logger would emit INFO level logs func (l *intLogger) IsInfo() bool { - return Level(atomic.LoadInt32(l.level)) <= Info + return l.GetLevel() <= Info } // Indicate that the logger would emit WARN level logs func (l *intLogger) IsWarn() bool { - return Level(atomic.LoadInt32(l.level)) <= Warn + return l.GetLevel() <= Warn } // Indicate that the logger would emit ERROR level logs func (l *intLogger) IsError() bool { - return Level(atomic.LoadInt32(l.level)) <= Error + return l.GetLevel() <= Error } const MissingKey = "EXTRA_VALUE_AT_END" @@ -854,12 +890,63 @@ func (l *intLogger) resetOutput(opts *LoggerOptions) error { // Update the logging level on-the-fly. This will affect all subloggers as // well. func (l *intLogger) SetLevel(level Level) { - atomic.StoreInt32(l.level, int32(level)) + if !l.syncParentLevel { + atomic.StoreInt32(l.level, int32(level)) + return + } + + nsl := new(int32) + *nsl = int32(level) + + l.level = nsl + + l.ownEpoch = atomic.AddUint64(l.curEpoch, 1) + l.setEpoch = l.ownEpoch +} + +func (l *intLogger) searchLevelPtr() *int32 { + p := l.parent + + ptr := l.level + + max := l.setEpoch + + for p != nil { + if p.setEpoch > max { + max = p.setEpoch + ptr = p.level + } + + p = p.parent + } + + return ptr } // Returns the current level func (l *intLogger) GetLevel() Level { - return Level(atomic.LoadInt32(l.level)) + // We perform the loads immediately to keep the CPU pipeline busy, which + // effectively makes the second load cost nothing. Once loaded into registers + // the comparison returns the already loaded value. The comparison is almost + // always true, so the branch predictor should hit consistently with it. + var ( + curEpoch = atomic.LoadUint64(l.curEpoch) + level = Level(atomic.LoadInt32(l.level)) + own = l.ownEpoch + ) + + if curEpoch == own { + return level + } + + // Perform the level sync process. We'll avoid doing this next time by seeing the + // epoch as current. + + ptr := l.searchLevelPtr() + l.level = ptr + l.ownEpoch = curEpoch + + return Level(atomic.LoadInt32(ptr)) } // Create a *log.Logger that will send it's data through this Logger. This @@ -912,6 +999,8 @@ func (l *intLogger) copy() *intLogger { if l.independentLevels { sl.level = new(int32) *sl.level = *l.level + } else if l.syncParentLevel { + sl.parent = l } return &sl diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 947ac0c9afc..ad17544f550 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -233,6 +233,7 @@ type StandardLoggerOptions struct { // [DEBUG] and strip it off before reapplying it. // The timestamp detection may result in false positives and incomplete // string outputs. + // InferLevelsWithTimestamp is only relevant if InferLevels is true. InferLevelsWithTimestamp bool // ForceLevel is used to force all output from the standard logger to be at @@ -263,6 +264,9 @@ type LoggerOptions struct { // Control if the output should be in JSON. JSONFormat bool + // Control the escape switch of json.Encoder + JSONEscapeDisabled bool + // Include file and line information in each log line IncludeLocation bool @@ -303,6 +307,24 @@ type LoggerOptions struct { // will not affect the parent or sibling loggers. IndependentLevels bool + // When set, changing the level of a logger effects only it's direct sub-loggers + // rather than all sub-loggers. For example: + // a := logger.Named("a") + // a.SetLevel(Error) + // b := a.Named("b") + // c := a.Named("c") + // b.GetLevel() => Error + // c.GetLevel() => Error + // b.SetLevel(Info) + // a.GetLevel() => Error + // b.GetLevel() => Info + // c.GetLevel() => Error + // a.SetLevel(Warn) + // a.GetLevel() => Warn + // b.GetLevel() => Warn + // c.GetLevel() => Warn + SyncParentLevel bool + // SubloggerHook registers a function that is called when a sublogger via // Named, With, or ResetNamed is created. If defined, the function is passed // the newly created Logger and the returned Logger is returned from the diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index 15b760337ae..805f5e7b4ed 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -67,195 +67,200 @@ const ( // Keep index -1 as unknown UNKNOWN = -1 - // Add features - ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) - AESNI // Advanced Encryption Standard New Instructions - AMD3DNOW // AMD 3DNOW - AMD3DNOWEXT // AMD 3DNowExt - AMXBF16 // Tile computational operations on BFLOAT16 numbers - AMXFP16 // Tile computational operations on FP16 numbers - AMXINT8 // Tile computational operations on 8-bit integers - AMXTILE // Tile architecture - APX_F // Intel APX - AVX // AVX functions - AVX10 // If set the Intel AVX10 Converged Vector ISA is supported - AVX10_128 // If set indicates that AVX10 128-bit vector support is present - AVX10_256 // If set indicates that AVX10 256-bit vector support is present - AVX10_512 // If set indicates that AVX10 512-bit vector support is present - AVX2 // AVX2 functions - AVX512BF16 // AVX-512 BFLOAT16 Instructions - AVX512BITALG // AVX-512 Bit Algorithms - AVX512BW // AVX-512 Byte and Word Instructions - AVX512CD // AVX-512 Conflict Detection Instructions - AVX512DQ // AVX-512 Doubleword and Quadword Instructions - AVX512ER // AVX-512 Exponential and Reciprocal Instructions - AVX512F // AVX-512 Foundation - AVX512FP16 // AVX-512 FP16 Instructions - AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions - AVX512PF // AVX-512 Prefetch Instructions - AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions - AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 - AVX512VL // AVX-512 Vector Length Extensions - AVX512VNNI // AVX-512 Vector Neural Network Instructions - AVX512VP2INTERSECT // AVX-512 Intersect for D/Q - AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword - AVXIFMA // AVX-IFMA instructions - AVXNECONVERT // AVX-NE-CONVERT instructions - AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one - AVXVNNI // AVX (VEX encoded) VNNI neural network instructions - AVXVNNIINT8 // AVX-VNNI-INT8 instructions - BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 - BMI1 // Bit Manipulation Instruction Set 1 - BMI2 // Bit Manipulation Instruction Set 2 - CETIBT // Intel CET Indirect Branch Tracking - CETSS // Intel CET Shadow Stack - CLDEMOTE // Cache Line Demote - CLMUL // Carry-less Multiplication - CLZERO // CLZERO instruction supported - CMOV // i686 CMOV - CMPCCXADD // CMPCCXADD instructions - CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB - CMPXCHG8 // CMPXCHG8 instruction - CPBOOST // Core Performance Boost - CPPC // AMD: Collaborative Processor Performance Control - CX16 // CMPXCHG16B Instruction - EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ - ENQCMD // Enqueue Command - ERMS // Enhanced REP MOVSB/STOSB - F16C // Half-precision floating-point conversion - FLUSH_L1D // Flush L1D cache - FMA3 // Intel FMA 3. Does not imply AVX. - FMA4 // Bulldozer FMA4 functions - FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide - FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide - FSRM // Fast Short Rep Mov - FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 - FXSROPT // FXSAVE/FXRSTOR optimizations - GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. - HLE // Hardware Lock Elision - HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR - HTT // Hyperthreading (enabled) - HWA // Hardware assert supported. Indicates support for MSRC001_10 - HYBRID_CPU // This part has CPUs of more than one type. - HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors - IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel) - IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR - IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) - IBRS // AMD: Indirect Branch Restricted Speculation - IBRS_PREFERRED // AMD: IBRS is preferred over software solution - IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection - IBS // Instruction Based Sampling (AMD) - IBSBRNTRGT // Instruction Based Sampling Feature (AMD) - IBSFETCHSAM // Instruction Based Sampling Feature (AMD) - IBSFFV // Instruction Based Sampling Feature (AMD) - IBSOPCNT // Instruction Based Sampling Feature (AMD) - IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) - IBSOPSAM // Instruction Based Sampling Feature (AMD) - IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) - IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) - IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported - IBS_OPDATA4 // AMD: IBS op data 4 MSR supported - IBS_OPFUSE // AMD: Indicates support for IbsOpFuse - IBS_PREVENTHOST // Disallowing IBS use by the host supported - IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 - IDPRED_CTRL // IPRED_DIS - INT_WBINVD // WBINVD/WBNOINVD are interruptible. - INVLPGB // NVLPGB and TLBSYNC instruction supported - KEYLOCKER // Key locker - KEYLOCKERW // Key locker wide - LAHF // LAHF/SAHF in long mode - LAM // If set, CPU supports Linear Address Masking - LBRVIRT // LBR virtualization - LZCNT // LZCNT instruction - MCAOVERFLOW // MCA overflow recovery support. - MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. - MCOMMIT // MCOMMIT instruction supported - MD_CLEAR // VERW clears CPU buffers - MMX // standard MMX - MMXEXT // SSE integer functions or AMD MMX ext - MOVBE // MOVBE instruction (big-endian) - MOVDIR64B // Move 64 Bytes as Direct Store - MOVDIRI // Move Doubleword as Direct Store - MOVSB_ZL // Fast Zero-Length MOVSB - MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD - MPX // Intel MPX (Memory Protection Extensions) - MSRIRC // Instruction Retired Counter MSR available - MSRLIST // Read/Write List of Model Specific Registers - MSR_PAGEFLUSH // Page Flush MSR available - NRIPS // Indicates support for NRIP save on VMEXIT - NX // NX (No-Execute) bit - OSXSAVE // XSAVE enabled by OS - PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption - POPCNT // POPCNT instruction - PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled - PREFETCHI // PREFETCHIT0/1 instructions - PSFD // Predictive Store Forward Disable - RDPRU // RDPRU instruction supported - RDRAND // RDRAND instruction is available - RDSEED // RDSEED instruction is available - RDTSCP // RDTSCP Instruction - RRSBA_CTRL // Restricted RSB Alternate - RTM // Restricted Transactional Memory - RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. - SERIALIZE // Serialize Instruction Execution - SEV // AMD Secure Encrypted Virtualization supported - SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host - SEV_ALTERNATIVE // AMD SEV Alternate Injection supported - SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests - SEV_ES // AMD SEV Encrypted State supported - SEV_RESTRICTED // AMD SEV Restricted Injection supported - SEV_SNP // AMD SEV Secure Nested Paging supported - SGX // Software Guard Extensions - SGXLC // Software Guard Extensions Launch Control - SHA // Intel SHA Extensions - SME // AMD Secure Memory Encryption supported - SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced - SPEC_CTRL_SSBD // Speculative Store Bypass Disable - SRBDS_CTRL // SRBDS mitigation MSR available - SSE // SSE functions - SSE2 // P4 SSE functions - SSE3 // Prescott SSE3 functions - SSE4 // Penryn SSE4.1 functions - SSE42 // Nehalem SSE4.2 functions - SSE4A // AMD Barcelona microarchitecture SSE4a instructions - SSSE3 // Conroe SSSE3 functions - STIBP // Single Thread Indirect Branch Predictors - STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On - STOSB_SHORT // Fast short STOSB - SUCCOR // Software uncorrectable error containment and recovery capability. - SVM // AMD Secure Virtual Machine - SVMDA // Indicates support for the SVM decode assists. - SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control - SVML // AMD SVM lock. Indicates support for SVM-Lock. - SVMNP // AMD SVM nested paging - SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter - SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold - SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. - SYSEE // SYSENTER and SYSEXIT instructions - TBM // AMD Trailing Bit Manipulation - TDX_GUEST // Intel Trust Domain Extensions Guest - TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations - TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. - TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. - TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 - TSXLDTRK // Intel TSX Suspend Load Address Tracking - VAES // Vector AES. AVX(512) versions requires additional checks. - VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits. - VMPL // AMD VM Permission Levels supported - VMSA_REGPROT // AMD VMSA Register Protection supported - VMX // Virtual Machine Extensions - VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. - VTE // AMD Virtual Transparent Encryption supported - WAITPKG // TPAUSE, UMONITOR, UMWAIT - WBNOINVD // Write Back and Do Not Invalidate Cache - WRMSRNS // Non-Serializing Write to Model Specific Register - X87 // FPU - XGETBV1 // Supports XGETBV with ECX = 1 - XOP // Bulldozer XOP functions - XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV - XSAVEC // Supports XSAVEC and the compacted form of XRSTOR. - XSAVEOPT // XSAVEOPT available - XSAVES // Supports XSAVES/XRSTORS and IA32_XSS + // x86 features + ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + AESNI // Advanced Encryption Standard New Instructions + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + AMXBF16 // Tile computational operations on BFLOAT16 numbers + AMXFP16 // Tile computational operations on FP16 numbers + AMXINT8 // Tile computational operations on 8-bit integers + AMXTILE // Tile architecture + APX_F // Intel APX + AVX // AVX functions + AVX10 // If set the Intel AVX10 Converged Vector ISA is supported + AVX10_128 // If set indicates that AVX10 128-bit vector support is present + AVX10_256 // If set indicates that AVX10 256-bit vector support is present + AVX10_512 // If set indicates that AVX10 512-bit vector support is present + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512FP16 // AVX-512 FP16 Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + AVXIFMA // AVX-IFMA instructions + AVXNECONVERT // AVX-NE-CONVERT instructions + AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one + AVXVNNI // AVX (VEX encoded) VNNI neural network instructions + AVXVNNIINT8 // AVX-VNNI-INT8 instructions + BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + CETIBT // Intel CET Indirect Branch Tracking + CETSS // Intel CET Shadow Stack + CLDEMOTE // Cache Line Demote + CLMUL // Carry-less Multiplication + CLZERO // CLZERO instruction supported + CMOV // i686 CMOV + CMPCCXADD // CMPCCXADD instructions + CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB + CMPXCHG8 // CMPXCHG8 instruction + CPBOOST // Core Performance Boost + CPPC // AMD: Collaborative Processor Performance Control + CX16 // CMPXCHG16B Instruction + EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ + ENQCMD // Enqueue Command + ERMS // Enhanced REP MOVSB/STOSB + F16C // Half-precision floating-point conversion + FLUSH_L1D // Flush L1D cache + FMA3 // Intel FMA 3. Does not imply AVX. + FMA4 // Bulldozer FMA4 functions + FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide + FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide + FSRM // Fast Short Rep Mov + FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 + FXSROPT // FXSAVE/FXRSTOR optimizations + GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. + HLE // Hardware Lock Elision + HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR + HTT // Hyperthreading (enabled) + HWA // Hardware assert supported. Indicates support for MSRC001_10 + HYBRID_CPU // This part has CPUs of more than one type. + HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors + IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel) + IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR + IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + IBPB_BRTYPE // Indicates that MSR 49h (PRED_CMD) bit 0 (IBPB) flushes all branch type predictions from the CPU branch predictor + IBRS // AMD: Indirect Branch Restricted Speculation + IBRS_PREFERRED // AMD: IBRS is preferred over software solution + IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection + IBS // Instruction Based Sampling (AMD) + IBSBRNTRGT // Instruction Based Sampling Feature (AMD) + IBSFETCHSAM // Instruction Based Sampling Feature (AMD) + IBSFFV // Instruction Based Sampling Feature (AMD) + IBSOPCNT // Instruction Based Sampling Feature (AMD) + IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) + IBSOPSAM // Instruction Based Sampling Feature (AMD) + IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) + IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) + IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported + IBS_OPDATA4 // AMD: IBS op data 4 MSR supported + IBS_OPFUSE // AMD: Indicates support for IbsOpFuse + IBS_PREVENTHOST // Disallowing IBS use by the host supported + IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 + IDPRED_CTRL // IPRED_DIS + INT_WBINVD // WBINVD/WBNOINVD are interruptible. + INVLPGB // NVLPGB and TLBSYNC instruction supported + KEYLOCKER // Key locker + KEYLOCKERW // Key locker wide + LAHF // LAHF/SAHF in long mode + LAM // If set, CPU supports Linear Address Masking + LBRVIRT // LBR virtualization + LZCNT // LZCNT instruction + MCAOVERFLOW // MCA overflow recovery support. + MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. + MCOMMIT // MCOMMIT instruction supported + MD_CLEAR // VERW clears CPU buffers + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + MOVBE // MOVBE instruction (big-endian) + MOVDIR64B // Move 64 Bytes as Direct Store + MOVDIRI // Move Doubleword as Direct Store + MOVSB_ZL // Fast Zero-Length MOVSB + MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD + MPX // Intel MPX (Memory Protection Extensions) + MSRIRC // Instruction Retired Counter MSR available + MSRLIST // Read/Write List of Model Specific Registers + MSR_PAGEFLUSH // Page Flush MSR available + NRIPS // Indicates support for NRIP save on VMEXIT + NX // NX (No-Execute) bit + OSXSAVE // XSAVE enabled by OS + PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption + POPCNT // POPCNT instruction + PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled + PREFETCHI // PREFETCHIT0/1 instructions + PSFD // Predictive Store Forward Disable + RDPRU // RDPRU instruction supported + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + RDTSCP // RDTSCP Instruction + RRSBA_CTRL // Restricted RSB Alternate + RTM // Restricted Transactional Memory + RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. + SBPB // Indicates support for the Selective Branch Predictor Barrier + SERIALIZE // Serialize Instruction Execution + SEV // AMD Secure Encrypted Virtualization supported + SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host + SEV_ALTERNATIVE // AMD SEV Alternate Injection supported + SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests + SEV_ES // AMD SEV Encrypted State supported + SEV_RESTRICTED // AMD SEV Restricted Injection supported + SEV_SNP // AMD SEV Secure Nested Paging supported + SGX // Software Guard Extensions + SGXLC // Software Guard Extensions Launch Control + SHA // Intel SHA Extensions + SME // AMD Secure Memory Encryption supported + SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced + SPEC_CTRL_SSBD // Speculative Store Bypass Disable + SRBDS_CTRL // SRBDS mitigation MSR available + SRSO_MSR_FIX // Indicates that software may use MSR BP_CFG[BpSpecReduce] to mitigate SRSO. + SRSO_NO // Indicates the CPU is not subject to the SRSO vulnerability + SRSO_USER_KERNEL_NO // Indicates the CPU is not subject to the SRSO vulnerability across user/kernel boundaries + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + STIBP // Single Thread Indirect Branch Predictors + STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On + STOSB_SHORT // Fast short STOSB + SUCCOR // Software uncorrectable error containment and recovery capability. + SVM // AMD Secure Virtual Machine + SVMDA // Indicates support for the SVM decode assists. + SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control + SVML // AMD SVM lock. Indicates support for SVM-Lock. + SVMNP // AMD SVM nested paging + SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter + SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold + SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. + SYSEE // SYSENTER and SYSEXIT instructions + TBM // AMD Trailing Bit Manipulation + TDX_GUEST // Intel Trust Domain Extensions Guest + TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations + TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. + TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. + TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 + TSXLDTRK // Intel TSX Suspend Load Address Tracking + VAES // Vector AES. AVX(512) versions requires additional checks. + VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits. + VMPL // AMD VM Permission Levels supported + VMSA_REGPROT // AMD VMSA Register Protection supported + VMX // Virtual Machine Extensions + VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. + VTE // AMD Virtual Transparent Encryption supported + WAITPKG // TPAUSE, UMONITOR, UMWAIT + WBNOINVD // Write Back and Do Not Invalidate Cache + WRMSRNS // Non-Serializing Write to Model Specific Register + X87 // FPU + XGETBV1 // Supports XGETBV with ECX = 1 + XOP // Bulldozer XOP functions + XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV + XSAVEC // Supports XSAVEC and the compacted form of XRSTOR. + XSAVEOPT // XSAVEOPT available + XSAVES // Supports XSAVES/XRSTORS and IA32_XSS // ARM features: AESARM // AES instructions @@ -309,10 +314,11 @@ type CPUInfo struct { L2 int // L2 Cache (per core or shared). Will be -1 if undetected L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected } - SGX SGXSupport - AVX10Level uint8 - maxFunc uint32 - maxExFunc uint32 + SGX SGXSupport + AMDMemEncryption AMDMemEncryptionSupport + AVX10Level uint8 + maxFunc uint32 + maxExFunc uint32 } var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) @@ -1079,6 +1085,32 @@ func hasSGX(available, lc bool) (rval SGXSupport) { return } +type AMDMemEncryptionSupport struct { + Available bool + CBitPossition uint32 + NumVMPL uint32 + PhysAddrReduction uint32 + NumEntryptedGuests uint32 + MinSevNoEsAsid uint32 +} + +func hasAMDMemEncryption(available bool) (rval AMDMemEncryptionSupport) { + rval.Available = available + if !available { + return + } + + _, b, c, d := cpuidex(0x8000001f, 0) + + rval.CBitPossition = b & 0x3f + rval.PhysAddrReduction = (b >> 6) & 0x3F + rval.NumVMPL = (b >> 12) & 0xf + rval.NumEntryptedGuests = c + rval.MinSevNoEsAsid = d + + return +} + func support() flagSet { var fs flagSet mfi := maxFunctionID() @@ -1418,6 +1450,15 @@ func support() flagSet { fs.setIf((a>>24)&1 == 1, VMSA_REGPROT) } + if maxExtendedFunction() >= 0x80000021 && vend == AMD { + a, _, _, _ := cpuid(0x80000021) + fs.setIf((a>>31)&1 == 1, SRSO_MSR_FIX) + fs.setIf((a>>30)&1 == 1, SRSO_USER_KERNEL_NO) + fs.setIf((a>>29)&1 == 1, SRSO_NO) + fs.setIf((a>>28)&1 == 1, IBPB_BRTYPE) + fs.setIf((a>>27)&1 == 1, SBPB) + } + if mfi >= 0x20 { // Microsoft has decided to purposefully hide the information // of the guest TEE when VMs are being created using Hyper-V. diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go index c7dfa125de4..799b400c2ec 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -27,6 +27,7 @@ func addInfo(c *CPUInfo, safe bool) { c.Family, c.Model, c.Stepping = familyModel() c.featureSet = support() c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC)) + c.AMDMemEncryption = hasAMDMemEncryption(c.featureSet.inSet(SME) || c.featureSet.inSet(SEV)) c.ThreadsPerCore = threadsPerCore() c.LogicalCores = logicalCores() c.PhysicalCores = physicalCores() diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index 43bd05f5168..57a085a53bf 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -81,152 +81,157 @@ func _() { _ = x[IA32_ARCH_CAP-71] _ = x[IA32_CORE_CAP-72] _ = x[IBPB-73] - _ = x[IBRS-74] - _ = x[IBRS_PREFERRED-75] - _ = x[IBRS_PROVIDES_SMP-76] - _ = x[IBS-77] - _ = x[IBSBRNTRGT-78] - _ = x[IBSFETCHSAM-79] - _ = x[IBSFFV-80] - _ = x[IBSOPCNT-81] - _ = x[IBSOPCNTEXT-82] - _ = x[IBSOPSAM-83] - _ = x[IBSRDWROPCNT-84] - _ = x[IBSRIPINVALIDCHK-85] - _ = x[IBS_FETCH_CTLX-86] - _ = x[IBS_OPDATA4-87] - _ = x[IBS_OPFUSE-88] - _ = x[IBS_PREVENTHOST-89] - _ = x[IBS_ZEN4-90] - _ = x[IDPRED_CTRL-91] - _ = x[INT_WBINVD-92] - _ = x[INVLPGB-93] - _ = x[KEYLOCKER-94] - _ = x[KEYLOCKERW-95] - _ = x[LAHF-96] - _ = x[LAM-97] - _ = x[LBRVIRT-98] - _ = x[LZCNT-99] - _ = x[MCAOVERFLOW-100] - _ = x[MCDT_NO-101] - _ = x[MCOMMIT-102] - _ = x[MD_CLEAR-103] - _ = x[MMX-104] - _ = x[MMXEXT-105] - _ = x[MOVBE-106] - _ = x[MOVDIR64B-107] - _ = x[MOVDIRI-108] - _ = x[MOVSB_ZL-109] - _ = x[MOVU-110] - _ = x[MPX-111] - _ = x[MSRIRC-112] - _ = x[MSRLIST-113] - _ = x[MSR_PAGEFLUSH-114] - _ = x[NRIPS-115] - _ = x[NX-116] - _ = x[OSXSAVE-117] - _ = x[PCONFIG-118] - _ = x[POPCNT-119] - _ = x[PPIN-120] - _ = x[PREFETCHI-121] - _ = x[PSFD-122] - _ = x[RDPRU-123] - _ = x[RDRAND-124] - _ = x[RDSEED-125] - _ = x[RDTSCP-126] - _ = x[RRSBA_CTRL-127] - _ = x[RTM-128] - _ = x[RTM_ALWAYS_ABORT-129] - _ = x[SERIALIZE-130] - _ = x[SEV-131] - _ = x[SEV_64BIT-132] - _ = x[SEV_ALTERNATIVE-133] - _ = x[SEV_DEBUGSWAP-134] - _ = x[SEV_ES-135] - _ = x[SEV_RESTRICTED-136] - _ = x[SEV_SNP-137] - _ = x[SGX-138] - _ = x[SGXLC-139] - _ = x[SHA-140] - _ = x[SME-141] - _ = x[SME_COHERENT-142] - _ = x[SPEC_CTRL_SSBD-143] - _ = x[SRBDS_CTRL-144] - _ = x[SSE-145] - _ = x[SSE2-146] - _ = x[SSE3-147] - _ = x[SSE4-148] - _ = x[SSE42-149] - _ = x[SSE4A-150] - _ = x[SSSE3-151] - _ = x[STIBP-152] - _ = x[STIBP_ALWAYSON-153] - _ = x[STOSB_SHORT-154] - _ = x[SUCCOR-155] - _ = x[SVM-156] - _ = x[SVMDA-157] - _ = x[SVMFBASID-158] - _ = x[SVML-159] - _ = x[SVMNP-160] - _ = x[SVMPF-161] - _ = x[SVMPFT-162] - _ = x[SYSCALL-163] - _ = x[SYSEE-164] - _ = x[TBM-165] - _ = x[TDX_GUEST-166] - _ = x[TLB_FLUSH_NESTED-167] - _ = x[TME-168] - _ = x[TOPEXT-169] - _ = x[TSCRATEMSR-170] - _ = x[TSXLDTRK-171] - _ = x[VAES-172] - _ = x[VMCBCLEAN-173] - _ = x[VMPL-174] - _ = x[VMSA_REGPROT-175] - _ = x[VMX-176] - _ = x[VPCLMULQDQ-177] - _ = x[VTE-178] - _ = x[WAITPKG-179] - _ = x[WBNOINVD-180] - _ = x[WRMSRNS-181] - _ = x[X87-182] - _ = x[XGETBV1-183] - _ = x[XOP-184] - _ = x[XSAVE-185] - _ = x[XSAVEC-186] - _ = x[XSAVEOPT-187] - _ = x[XSAVES-188] - _ = x[AESARM-189] - _ = x[ARMCPUID-190] - _ = x[ASIMD-191] - _ = x[ASIMDDP-192] - _ = x[ASIMDHP-193] - _ = x[ASIMDRDM-194] - _ = x[ATOMICS-195] - _ = x[CRC32-196] - _ = x[DCPOP-197] - _ = x[EVTSTRM-198] - _ = x[FCMA-199] - _ = x[FP-200] - _ = x[FPHP-201] - _ = x[GPA-202] - _ = x[JSCVT-203] - _ = x[LRCPC-204] - _ = x[PMULL-205] - _ = x[SHA1-206] - _ = x[SHA2-207] - _ = x[SHA3-208] - _ = x[SHA512-209] - _ = x[SM3-210] - _ = x[SM4-211] - _ = x[SVE-212] - _ = x[lastID-213] + _ = x[IBPB_BRTYPE-74] + _ = x[IBRS-75] + _ = x[IBRS_PREFERRED-76] + _ = x[IBRS_PROVIDES_SMP-77] + _ = x[IBS-78] + _ = x[IBSBRNTRGT-79] + _ = x[IBSFETCHSAM-80] + _ = x[IBSFFV-81] + _ = x[IBSOPCNT-82] + _ = x[IBSOPCNTEXT-83] + _ = x[IBSOPSAM-84] + _ = x[IBSRDWROPCNT-85] + _ = x[IBSRIPINVALIDCHK-86] + _ = x[IBS_FETCH_CTLX-87] + _ = x[IBS_OPDATA4-88] + _ = x[IBS_OPFUSE-89] + _ = x[IBS_PREVENTHOST-90] + _ = x[IBS_ZEN4-91] + _ = x[IDPRED_CTRL-92] + _ = x[INT_WBINVD-93] + _ = x[INVLPGB-94] + _ = x[KEYLOCKER-95] + _ = x[KEYLOCKERW-96] + _ = x[LAHF-97] + _ = x[LAM-98] + _ = x[LBRVIRT-99] + _ = x[LZCNT-100] + _ = x[MCAOVERFLOW-101] + _ = x[MCDT_NO-102] + _ = x[MCOMMIT-103] + _ = x[MD_CLEAR-104] + _ = x[MMX-105] + _ = x[MMXEXT-106] + _ = x[MOVBE-107] + _ = x[MOVDIR64B-108] + _ = x[MOVDIRI-109] + _ = x[MOVSB_ZL-110] + _ = x[MOVU-111] + _ = x[MPX-112] + _ = x[MSRIRC-113] + _ = x[MSRLIST-114] + _ = x[MSR_PAGEFLUSH-115] + _ = x[NRIPS-116] + _ = x[NX-117] + _ = x[OSXSAVE-118] + _ = x[PCONFIG-119] + _ = x[POPCNT-120] + _ = x[PPIN-121] + _ = x[PREFETCHI-122] + _ = x[PSFD-123] + _ = x[RDPRU-124] + _ = x[RDRAND-125] + _ = x[RDSEED-126] + _ = x[RDTSCP-127] + _ = x[RRSBA_CTRL-128] + _ = x[RTM-129] + _ = x[RTM_ALWAYS_ABORT-130] + _ = x[SBPB-131] + _ = x[SERIALIZE-132] + _ = x[SEV-133] + _ = x[SEV_64BIT-134] + _ = x[SEV_ALTERNATIVE-135] + _ = x[SEV_DEBUGSWAP-136] + _ = x[SEV_ES-137] + _ = x[SEV_RESTRICTED-138] + _ = x[SEV_SNP-139] + _ = x[SGX-140] + _ = x[SGXLC-141] + _ = x[SHA-142] + _ = x[SME-143] + _ = x[SME_COHERENT-144] + _ = x[SPEC_CTRL_SSBD-145] + _ = x[SRBDS_CTRL-146] + _ = x[SRSO_MSR_FIX-147] + _ = x[SRSO_NO-148] + _ = x[SRSO_USER_KERNEL_NO-149] + _ = x[SSE-150] + _ = x[SSE2-151] + _ = x[SSE3-152] + _ = x[SSE4-153] + _ = x[SSE42-154] + _ = x[SSE4A-155] + _ = x[SSSE3-156] + _ = x[STIBP-157] + _ = x[STIBP_ALWAYSON-158] + _ = x[STOSB_SHORT-159] + _ = x[SUCCOR-160] + _ = x[SVM-161] + _ = x[SVMDA-162] + _ = x[SVMFBASID-163] + _ = x[SVML-164] + _ = x[SVMNP-165] + _ = x[SVMPF-166] + _ = x[SVMPFT-167] + _ = x[SYSCALL-168] + _ = x[SYSEE-169] + _ = x[TBM-170] + _ = x[TDX_GUEST-171] + _ = x[TLB_FLUSH_NESTED-172] + _ = x[TME-173] + _ = x[TOPEXT-174] + _ = x[TSCRATEMSR-175] + _ = x[TSXLDTRK-176] + _ = x[VAES-177] + _ = x[VMCBCLEAN-178] + _ = x[VMPL-179] + _ = x[VMSA_REGPROT-180] + _ = x[VMX-181] + _ = x[VPCLMULQDQ-182] + _ = x[VTE-183] + _ = x[WAITPKG-184] + _ = x[WBNOINVD-185] + _ = x[WRMSRNS-186] + _ = x[X87-187] + _ = x[XGETBV1-188] + _ = x[XOP-189] + _ = x[XSAVE-190] + _ = x[XSAVEC-191] + _ = x[XSAVEOPT-192] + _ = x[XSAVES-193] + _ = x[AESARM-194] + _ = x[ARMCPUID-195] + _ = x[ASIMD-196] + _ = x[ASIMDDP-197] + _ = x[ASIMDHP-198] + _ = x[ASIMDRDM-199] + _ = x[ATOMICS-200] + _ = x[CRC32-201] + _ = x[DCPOP-202] + _ = x[EVTSTRM-203] + _ = x[FCMA-204] + _ = x[FP-205] + _ = x[FPHP-206] + _ = x[GPA-207] + _ = x[JSCVT-208] + _ = x[LRCPC-209] + _ = x[PMULL-210] + _ = x[SHA1-211] + _ = x[SHA2-212] + _ = x[SHA3-213] + _ = x[SHA512-214] + _ = x[SM3-215] + _ = x[SM4-216] + _ = x[SVE-217] + _ = x[lastID-218] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 319, 323, 327, 333, 338, 346, 351, 357, 361, 370, 388, 396, 403, 407, 411, 425, 431, 435, 439, 448, 452, 456, 461, 466, 470, 474, 481, 485, 488, 494, 497, 500, 510, 520, 533, 546, 550, 554, 568, 585, 588, 598, 609, 615, 623, 634, 642, 654, 670, 684, 695, 705, 720, 728, 739, 749, 756, 765, 775, 779, 782, 789, 794, 805, 812, 819, 827, 830, 836, 841, 850, 857, 865, 869, 872, 878, 885, 898, 903, 905, 912, 919, 925, 929, 938, 942, 947, 953, 959, 965, 975, 978, 994, 1003, 1006, 1015, 1030, 1043, 1049, 1063, 1070, 1073, 1078, 1081, 1084, 1096, 1110, 1120, 1123, 1127, 1131, 1135, 1140, 1145, 1150, 1155, 1169, 1180, 1186, 1189, 1194, 1203, 1207, 1212, 1217, 1223, 1230, 1235, 1238, 1247, 1263, 1266, 1272, 1282, 1290, 1294, 1303, 1307, 1319, 1322, 1332, 1335, 1342, 1350, 1357, 1360, 1367, 1370, 1375, 1381, 1389, 1395, 1401, 1409, 1414, 1421, 1428, 1436, 1443, 1448, 1453, 1460, 1464, 1466, 1470, 1473, 1478, 1483, 1488, 1492, 1496, 1500, 1506, 1509, 1512, 1515, 1521} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 319, 323, 327, 333, 338, 346, 351, 357, 361, 370, 388, 396, 403, 407, 411, 425, 431, 435, 439, 448, 452, 456, 461, 466, 470, 474, 481, 485, 488, 494, 497, 500, 510, 520, 533, 546, 550, 561, 565, 579, 596, 599, 609, 620, 626, 634, 645, 653, 665, 681, 695, 706, 716, 731, 739, 750, 760, 767, 776, 786, 790, 793, 800, 805, 816, 823, 830, 838, 841, 847, 852, 861, 868, 876, 880, 883, 889, 896, 909, 914, 916, 923, 930, 936, 940, 949, 953, 958, 964, 970, 976, 986, 989, 1005, 1009, 1018, 1021, 1030, 1045, 1058, 1064, 1078, 1085, 1088, 1093, 1096, 1099, 1111, 1125, 1135, 1147, 1154, 1173, 1176, 1180, 1184, 1188, 1193, 1198, 1203, 1208, 1222, 1233, 1239, 1242, 1247, 1256, 1260, 1265, 1270, 1276, 1283, 1288, 1291, 1300, 1316, 1319, 1325, 1335, 1343, 1347, 1356, 1360, 1372, 1375, 1385, 1388, 1395, 1403, 1410, 1413, 1420, 1423, 1428, 1434, 1442, 1448, 1454, 1462, 1467, 1474, 1481, 1489, 1496, 1501, 1506, 1513, 1517, 1519, 1523, 1526, 1531, 1536, 1541, 1545, 1549, 1553, 1559, 1562, 1565, 1568, 1574} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index 8c12c355c94..bb595626e6a 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -119,7 +119,7 @@ func (opts CopyDestOptions) Marshal(header http.Header) { if opts.ReplaceMetadata { header.Set("x-amz-metadata-directive", replaceDirective) for k, v := range filterCustomMeta(opts.UserMetadata) { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { header.Set(k, v) } else { header.Set("x-amz-meta-"+k, v) diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 4dec6040dfd..a96de9b9f6a 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -106,7 +106,11 @@ func (opts *PutObjectOptions) SetMatchETag(etag string) { if opts.customHeaders == nil { opts.customHeaders = http.Header{} } - opts.customHeaders.Set("If-Match", "\""+etag+"\"") + if etag == "*" { + opts.customHeaders.Set("If-Match", "*") + } else { + opts.customHeaders.Set("If-Match", "\""+etag+"\"") + } } // SetMatchETagExcept if etag does not match while PUT MinIO returns an @@ -116,7 +120,11 @@ func (opts *PutObjectOptions) SetMatchETagExcept(etag string) { if opts.customHeaders == nil { opts.customHeaders = http.Header{} } - opts.customHeaders.Set("If-None-Match", "\""+etag+"\"") + if etag == "*" { + opts.customHeaders.Set("If-None-Match", "*") + } else { + opts.customHeaders.Set("If-None-Match", "\""+etag+"\"") + } } // getNumThreads - gets the number of threads to be used in the multipart @@ -212,7 +220,7 @@ func (opts PutObjectOptions) Header() (header http.Header) { } for k, v := range opts.UserMetadata { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { header.Set(k, v) } else { header.Set("x-amz-meta-"+k, v) @@ -230,7 +238,7 @@ func (opts PutObjectOptions) Header() (header http.Header) { // validate() checks if the UserMetadata map has standard headers or and raises an error if so. func (opts PutObjectOptions) validate() (err error) { for k, v := range opts.UserMetadata { - if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) { + if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { return errInvalidArgument(k + " unsupported user defined metadata name") } if !httpguts.ValidHeaderFieldValue(v) { diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index be60529df33..eaaaa68c22a 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -129,7 +129,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.70" + libraryVersion = "v7.0.72" ) // User Agent should always following the below style. diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index b1979e320e7..e77bf9d4abe 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -1216,6 +1216,130 @@ func testPutObjectWithVersioning() { logSuccess(testName, function, args, startTime) } +func testListMultipartUpload() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + // Instantiate new minio client object. + opts := &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + } + c, err := minio.New(os.Getenv(serverEndpoint), opts) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + core, err := minio.NewCore(os.Getenv(serverEndpoint), opts) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO core client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + ctx := context.Background() + err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + defer func() { + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + } + }() + objName := "prefix/objectName" + + want := minio.ListMultipartUploadsResult{ + Bucket: bucketName, + KeyMarker: "", + UploadIDMarker: "", + NextKeyMarker: "", + NextUploadIDMarker: "", + EncodingType: "url", + MaxUploads: 1000, + IsTruncated: false, + Prefix: "prefix/objectName", + Delimiter: "/", + CommonPrefixes: nil, + } + for i := 0; i < 5; i++ { + uid, err := core.NewMultipartUpload(ctx, bucketName, objName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload failed", err) + return + } + want.Uploads = append(want.Uploads, minio.ObjectMultipartInfo{ + Initiated: time.Time{}, + StorageClass: "", + Key: objName, + Size: 0, + UploadID: uid, + Err: nil, + }) + + for j := 0; j < 5; j++ { + cmpGot := func(call string, got minio.ListMultipartUploadsResult) bool { + for i := range got.Uploads { + got.Uploads[i].Initiated = time.Time{} + } + if !reflect.DeepEqual(want, got) { + err := fmt.Errorf("want: %#v\ngot : %#v", want, got) + logError(testName, function, args, startTime, "", call+" failed", err) + } + return true + } + got, err := core.ListMultipartUploads(ctx, bucketName, objName, "", "", "/", 1000) + if err != nil { + logError(testName, function, args, startTime, "", "ListMultipartUploads failed", err) + return + } + if !cmpGot("ListMultipartUploads-prefix", got) { + return + } + got, err = core.ListMultipartUploads(ctx, bucketName, objName, objName, "", "/", 1000) + got.KeyMarker = "" + if err != nil { + logError(testName, function, args, startTime, "", "ListMultipartUploads failed", err) + return + } + if !cmpGot("ListMultipartUploads-marker", got) { + return + } + } + if i > 2 { + err = core.AbortMultipartUpload(ctx, bucketName, objName, uid) + if err != nil { + logError(testName, function, args, startTime, "", "AbortMultipartUpload failed", err) + return + } + want.Uploads = want.Uploads[:len(want.Uploads)-1] + } + } + for _, up := range want.Uploads { + err = core.AbortMultipartUpload(ctx, bucketName, objName, up.UploadID) + if err != nil { + logError(testName, function, args, startTime, "", "AbortMultipartUpload failed", err) + return + } + } + logSuccess(testName, function, args, startTime) +} + func testCopyObjectWithVersioning() { // initialize logging params startTime := time.Now() @@ -13536,6 +13660,7 @@ func main() { // execute tests if isFullMode() { + testListMultipartUpload() testGetObjectAttributes() testGetObjectAttributesErrorCases() testMakeBucketErrorV2() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go index 10c95ffe52d..e706b57de66 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -414,11 +414,32 @@ func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) e return en.EncodeElement(expirationWrapper(e), startElement) } +// DelMarkerExpiration represents DelMarkerExpiration actions element in an ILM policy +type DelMarkerExpiration struct { + XMLName xml.Name `xml:"DelMarkerExpiration" json:"-"` + Days int `xml:"Days,omitempty" json:"Days,omitempty"` +} + +// IsNull returns true if Days isn't specified and false otherwise. +func (de DelMarkerExpiration) IsNull() bool { + return de.Days == 0 +} + +// MarshalXML avoids serializing an empty DelMarkerExpiration element +func (de DelMarkerExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + if de.IsNull() { + return nil + } + type delMarkerExp DelMarkerExpiration + return enc.EncodeElement(delMarkerExp(de), start) +} + // MarshalJSON customizes json encoding by omitting empty values func (r Rule) MarshalJSON() ([]byte, error) { type rule struct { AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"` Expiration *Expiration `json:"Expiration,omitempty"` + DelMarkerExpiration *DelMarkerExpiration `json:"DelMarkerExpiration,omitempty"` ID string `json:"ID"` RuleFilter *Filter `json:"Filter,omitempty"` NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"` @@ -442,6 +463,9 @@ func (r Rule) MarshalJSON() ([]byte, error) { if !r.Expiration.IsNull() { newr.Expiration = &r.Expiration } + if !r.DelMarkerExpiration.IsNull() { + newr.DelMarkerExpiration = &r.DelMarkerExpiration + } if !r.Transition.IsNull() { newr.Transition = &r.Transition } @@ -460,6 +484,7 @@ type Rule struct { XMLName xml.Name `xml:"Rule,omitempty" json:"-"` AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"` Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"` + DelMarkerExpiration DelMarkerExpiration `xml:"DelMarkerExpiration,omitempty" json:"DelMarkerExpiration,omitempty"` ID string `xml:"ID" json:"ID"` RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"` NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"` diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go index a44799d246a..151ca21e88f 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -50,6 +50,7 @@ const ( ObjectRemovedAll EventType = "s3:ObjectRemoved:*" ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete" ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated" + ILMDelMarkerExpirationDelete EventType = "s3:LifecycleDelMarkerExpiration:Delete" ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject" ObjectTransitionAll EventType = "s3:ObjectTransition:*" ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed" diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 3f4881e82c1..f6dbbf7f6ea 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -19,12 +19,14 @@ package minio import ( "encoding/base64" + "errors" "fmt" "net/http" "strings" "time" "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/tags" ) // expirationDateFormat date format for expiration key in json policy. @@ -152,6 +154,27 @@ func (p *PostPolicy) SetCondition(matchType, condition, value string) error { return errInvalidArgument("Invalid condition in policy") } +// SetTagging - Sets tagging for the object for this policy based upload. +func (p *PostPolicy) SetTagging(tagging string) error { + if strings.TrimSpace(tagging) == "" || tagging == "" { + return errInvalidArgument("No tagging specified.") + } + _, err := tags.ParseObjectXML(strings.NewReader(tagging)) + if err != nil { + return errors.New("The XML you provided was not well-formed or did not validate against our published schema.") //nolint + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$tagging", + value: tagging, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["tagging"] = tagging + return nil +} + // SetContentType - Sets content-type of the object for this policy // based upload. func (p *PostPolicy) SetContentType(contentType string) error { diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index 252f452231d..a5beb371f2c 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -510,19 +510,9 @@ func isAmzHeader(headerKey string) bool { return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-") } -var supportedReplicationEncryptionHeaders = map[string]bool{ - "x-minio-replication-server-side-encryption-sealed-key": true, - "x-minio-replication-server-side-encryption-seal-algorithm": true, - "x-minio-replication-server-side-encryption-iv": true, - "x-minio-replication-encrypted-multipart": true, - "x-minio-replication-actual-object-size": true, - // Add more supported headers here. - // Must be lower case. -} - -// isValidReplicationEncryptionHeader returns true if header is one of valid replication encryption headers -func isValidReplicationEncryptionHeader(headerKey string) bool { - return supportedReplicationEncryptionHeaders[strings.ToLower(headerKey)] +// isMinioHeader returns true if header is x-minio- header. +func isMinioHeader(headerKey string) bool { + return strings.HasPrefix(strings.ToLower(headerKey), "x-minio-") } // supportedQueryValues is a list of query strings that can be passed in when using GetObject. diff --git a/vendor/github.com/prometheus/common/config/config.go b/vendor/github.com/prometheus/common/config/config.go index 3da4854d048..7588da55561 100644 --- a/vendor/github.com/prometheus/common/config/config.go +++ b/vendor/github.com/prometheus/common/config/config.go @@ -60,9 +60,9 @@ func (s Secret) MarshalJSON() ([]byte, error) { return json.Marshal(secretToken) } -type Header map[string][]Secret +type ProxyHeader map[string][]Secret -func (h *Header) HTTPHeader() http.Header { +func (h *ProxyHeader) HTTPHeader() http.Header { if h == nil || *h == nil { return nil } diff --git a/vendor/github.com/prometheus/common/config/headers.go b/vendor/github.com/prometheus/common/config/headers.go new file mode 100644 index 00000000000..4a0be4a10e9 --- /dev/null +++ b/vendor/github.com/prometheus/common/config/headers.go @@ -0,0 +1,140 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This package no longer handles safe yaml parsing. In order to +// ensure correct yaml unmarshalling, use "yaml.UnmarshalStrict()". + +package config + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "strings" +) + +// reservedHeaders that change the connection, are set by Prometheus, or can +// be changed otherwise. +var reservedHeaders = map[string]struct{}{ + "Authorization": {}, + "Host": {}, + "Content-Encoding": {}, + "Content-Length": {}, + "Content-Type": {}, + "User-Agent": {}, + "Connection": {}, + "Keep-Alive": {}, + "Proxy-Authenticate": {}, + "Proxy-Authorization": {}, + "Www-Authenticate": {}, + "Accept-Encoding": {}, + "X-Prometheus-Remote-Write-Version": {}, + "X-Prometheus-Remote-Read-Version": {}, + "X-Prometheus-Scrape-Timeout-Seconds": {}, + + // Added by SigV4. + "X-Amz-Date": {}, + "X-Amz-Security-Token": {}, + "X-Amz-Content-Sha256": {}, +} + +// Headers represents the configuration for HTTP headers. +type Headers struct { + Headers map[string]Header `yaml:",inline"` + dir string +} + +// Header represents the configuration for a single HTTP header. +type Header struct { + Values []string `yaml:"values,omitempty" json:"values,omitempty"` + Secrets []Secret `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Files []string `yaml:"files,omitempty" json:"files,omitempty"` +} + +func (h Headers) MarshalJSON() ([]byte, error) { + // Inline the Headers map when serializing JSON because json encoder doesn't support "inline" directive. + return json.Marshal(h.Headers) +} + +// SetDirectory records the directory to make headers file relative to the +// configuration file. +func (h *Headers) SetDirectory(dir string) { + if h == nil { + return + } + h.dir = dir +} + +// Validate validates the Headers config. +func (h *Headers) Validate() error { + for n, header := range h.Headers { + if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok { + return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n)) + } + for _, v := range header.Files { + f := JoinDir(h.dir, v) + _, err := os.ReadFile(f) + if err != nil { + return fmt.Errorf("unable to read header %q from file %s: %w", http.CanonicalHeaderKey(n), f, err) + } + } + } + return nil +} + +// NewHeadersRoundTripper returns a RoundTripper that sets HTTP headers on +// requests as configured. +func NewHeadersRoundTripper(config *Headers, next http.RoundTripper) http.RoundTripper { + if len(config.Headers) == 0 { + return next + } + return &headersRoundTripper{ + config: config, + next: next, + } +} + +type headersRoundTripper struct { + next http.RoundTripper + config *Headers +} + +// RoundTrip implements http.RoundTripper. +func (rt *headersRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + for n, h := range rt.config.Headers { + for _, v := range h.Values { + req.Header.Add(n, v) + } + for _, v := range h.Secrets { + req.Header.Add(n, string(v)) + } + for _, v := range h.Files { + f := JoinDir(rt.config.dir, v) + b, err := os.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("unable to read headers file %s: %w", f, err) + } + req.Header.Add(n, strings.TrimSpace(string(b))) + } + } + return rt.next.RoundTrip(req) +} + +// CloseIdleConnections implements closeIdler. +func (rt *headersRoundTripper) CloseIdleConnections() { + if ci, ok := rt.next.(closeIdler); ok { + ci.CloseIdleConnections() + } +} diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 20441818405..3e320134776 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -20,6 +20,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + "errors" "fmt" "net" "net/http" @@ -130,8 +131,12 @@ func (tv *TLSVersion) String() string { type BasicAuth struct { Username string `yaml:"username" json:"username"` UsernameFile string `yaml:"username_file,omitempty" json:"username_file,omitempty"` + // UsernameRef is the name of the secret within the secret manager to use as the username. + UsernameRef string `yaml:"username_ref,omitempty" json:"username_ref,omitempty"` Password Secret `yaml:"password,omitempty" json:"password,omitempty"` PasswordFile string `yaml:"password_file,omitempty" json:"password_file,omitempty"` + // PasswordRef is the name of the secret within the secret manager to use as the password. + PasswordRef string `yaml:"password_ref,omitempty" json:"password_ref,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -148,6 +153,8 @@ type Authorization struct { Type string `yaml:"type,omitempty" json:"type,omitempty"` Credentials Secret `yaml:"credentials,omitempty" json:"credentials,omitempty"` CredentialsFile string `yaml:"credentials_file,omitempty" json:"credentials_file,omitempty"` + // CredentialsRef is the name of the secret within the secret manager to use as credentials. + CredentialsRef string `yaml:"credentials_ref,omitempty" json:"credentials_ref,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -224,14 +231,17 @@ func (u URL) MarshalJSON() ([]byte, error) { // OAuth2 is the oauth2 client configuration. type OAuth2 struct { - ClientID string `yaml:"client_id" json:"client_id"` - ClientSecret Secret `yaml:"client_secret" json:"client_secret"` - ClientSecretFile string `yaml:"client_secret_file" json:"client_secret_file"` - Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"` - TokenURL string `yaml:"token_url" json:"token_url"` - EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"` - TLSConfig TLSConfig `yaml:"tls_config,omitempty"` - ProxyConfig `yaml:",inline"` + ClientID string `yaml:"client_id" json:"client_id"` + ClientSecret Secret `yaml:"client_secret" json:"client_secret"` + ClientSecretFile string `yaml:"client_secret_file" json:"client_secret_file"` + // ClientSecretRef is the name of the secret within the secret manager to use as the client + // secret. + ClientSecretRef string `yaml:"client_secret_ref" json:"client_secret_ref"` + Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"` + TokenURL string `yaml:"token_url" json:"token_url"` + EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"` + TLSConfig TLSConfig `yaml:"tls_config,omitempty"` + ProxyConfig `yaml:",inline"` } // UnmarshalYAML implements the yaml.Unmarshaler interface @@ -253,12 +263,12 @@ func (o *OAuth2) UnmarshalJSON(data []byte) error { } // SetDirectory joins any relative file paths with dir. -func (a *OAuth2) SetDirectory(dir string) { - if a == nil { +func (o *OAuth2) SetDirectory(dir string) { + if o == nil { return } - a.ClientSecretFile = JoinDir(dir, a.ClientSecretFile) - a.TLSConfig.SetDirectory(dir) + o.ClientSecretFile = JoinDir(dir, o.ClientSecretFile) + o.TLSConfig.SetDirectory(dir) } // LoadHTTPConfig parses the YAML input s into a HTTPClientConfig. @@ -311,6 +321,9 @@ type HTTPClientConfig struct { EnableHTTP2 bool `yaml:"enable_http2" json:"enable_http2"` // Proxy configuration. ProxyConfig `yaml:",inline"` + // HTTPHeaders specify headers to inject in the requests. Those headers + // could be marshalled back to the users. + HTTPHeaders *Headers `yaml:"http_headers,omitempty" json:"http_headers,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -322,9 +335,22 @@ func (c *HTTPClientConfig) SetDirectory(dir string) { c.BasicAuth.SetDirectory(dir) c.Authorization.SetDirectory(dir) c.OAuth2.SetDirectory(dir) + c.HTTPHeaders.SetDirectory(dir) c.BearerTokenFile = JoinDir(dir, c.BearerTokenFile) } +// nonZeroCount returns the amount of values that are non-zero. +func nonZeroCount[T comparable](values ...T) int { + count := 0 + var zero T + for _, value := range values { + if value != zero { + count += 1 + } + } + return count +} + // Validate validates the HTTPClientConfig to check only one of BearerToken, // BasicAuth and BearerTokenFile is configured. It also validates that ProxyURL // is set if ProxyConnectHeader is set. @@ -336,17 +362,17 @@ func (c *HTTPClientConfig) Validate() error { if (c.BasicAuth != nil || c.OAuth2 != nil) && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) { return fmt.Errorf("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured") } - if c.BasicAuth != nil && (string(c.BasicAuth.Username) != "" && c.BasicAuth.UsernameFile != "") { - return fmt.Errorf("at most one of basic_auth username & username_file must be configured") + if c.BasicAuth != nil && nonZeroCount(string(c.BasicAuth.Username) != "", c.BasicAuth.UsernameFile != "", c.BasicAuth.UsernameRef != "") > 1 { + return fmt.Errorf("at most one of basic_auth username, username_file & username_ref must be configured") } - if c.BasicAuth != nil && (string(c.BasicAuth.Password) != "" && c.BasicAuth.PasswordFile != "") { - return fmt.Errorf("at most one of basic_auth password & password_file must be configured") + if c.BasicAuth != nil && nonZeroCount(string(c.BasicAuth.Password) != "", c.BasicAuth.PasswordFile != "", c.BasicAuth.PasswordRef != "") > 1 { + return fmt.Errorf("at most one of basic_auth password, password_file & password_ref must be configured") } if c.Authorization != nil { if len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0 { return fmt.Errorf("authorization is not compatible with bearer_token & bearer_token_file") } - if string(c.Authorization.Credentials) != "" && c.Authorization.CredentialsFile != "" { + if nonZeroCount(string(c.Authorization.Credentials) != "", c.Authorization.CredentialsFile != "", c.Authorization.CredentialsRef != "") > 1 { return fmt.Errorf("at most one of authorization credentials & credentials_file must be configured") } c.Authorization.Type = strings.TrimSpace(c.Authorization.Type) @@ -381,13 +407,18 @@ func (c *HTTPClientConfig) Validate() error { if len(c.OAuth2.TokenURL) == 0 { return fmt.Errorf("oauth2 token_url must be configured") } - if len(c.OAuth2.ClientSecret) > 0 && len(c.OAuth2.ClientSecretFile) > 0 { - return fmt.Errorf("at most one of oauth2 client_secret & client_secret_file must be configured") + if nonZeroCount(len(c.OAuth2.ClientSecret) > 0, len(c.OAuth2.ClientSecretFile) > 0, len(c.OAuth2.ClientSecretRef) > 0) > 1 { + return fmt.Errorf("at most one of oauth2 client_secret, client_secret_file & client_secret_ref must be configured") } } if err := c.ProxyConfig.Validate(); err != nil { return err } + if c.HTTPHeaders != nil { + if err := c.HTTPHeaders.Validate(); err != nil { + return err + } + } return nil } @@ -428,50 +459,78 @@ type httpClientOptions struct { idleConnTimeout time.Duration userAgent string host string + secretManager SecretManager } // HTTPClientOption defines an option that can be applied to the HTTP client. -type HTTPClientOption func(options *httpClientOptions) +type HTTPClientOption interface { + applyToHTTPClientOptions(options *httpClientOptions) +} + +type httpClientOptionFunc func(options *httpClientOptions) + +func (f httpClientOptionFunc) applyToHTTPClientOptions(options *httpClientOptions) { + f(options) +} // WithDialContextFunc allows you to override func gets used for the actual dialing. The default is `net.Dialer.DialContext`. func WithDialContextFunc(fn DialContextFunc) HTTPClientOption { - return func(opts *httpClientOptions) { + return httpClientOptionFunc(func(opts *httpClientOptions) { opts.dialContextFunc = fn - } + }) } // WithKeepAlivesDisabled allows to disable HTTP keepalive. func WithKeepAlivesDisabled() HTTPClientOption { - return func(opts *httpClientOptions) { + return httpClientOptionFunc(func(opts *httpClientOptions) { opts.keepAlivesEnabled = false - } + }) } // WithHTTP2Disabled allows to disable HTTP2. func WithHTTP2Disabled() HTTPClientOption { - return func(opts *httpClientOptions) { + return httpClientOptionFunc(func(opts *httpClientOptions) { opts.http2Enabled = false - } + }) } // WithIdleConnTimeout allows setting the idle connection timeout. func WithIdleConnTimeout(timeout time.Duration) HTTPClientOption { - return func(opts *httpClientOptions) { + return httpClientOptionFunc(func(opts *httpClientOptions) { opts.idleConnTimeout = timeout - } + }) } // WithUserAgent allows setting the user agent. func WithUserAgent(ua string) HTTPClientOption { - return func(opts *httpClientOptions) { + return httpClientOptionFunc(func(opts *httpClientOptions) { opts.userAgent = ua - } + }) } // WithHost allows setting the host header. func WithHost(host string) HTTPClientOption { - return func(opts *httpClientOptions) { + return httpClientOptionFunc(func(opts *httpClientOptions) { opts.host = host + }) +} + +type secretManagerOption struct { + secretManager SecretManager +} + +func (s *secretManagerOption) applyToHTTPClientOptions(opts *httpClientOptions) { + opts.secretManager = s.secretManager +} + +func (s *secretManagerOption) applyToTLSConfigOptions(opts *tlsConfigOptions) { + opts.secretManager = s.secretManager +} + +// WithSecretManager allows setting the secret manager. +func WithSecretManager(manager SecretManager) *secretManagerOption { + return &secretManagerOption{ + secretManager: manager, } } @@ -501,9 +560,16 @@ func NewClientFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HTTPClie // given config.HTTPClientConfig and config.HTTPClientOption. // The name is used as go-conntrack metric label. func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HTTPClientOption) (http.RoundTripper, error) { + return NewRoundTripperFromConfigWithContext(context.Background(), cfg, name, optFuncs...) +} + +// NewRoundTripperFromConfigWithContext returns a new HTTP RoundTripper configured for the +// given config.HTTPClientConfig and config.HTTPClientOption. +// The name is used as go-conntrack metric label. +func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientConfig, name string, optFuncs ...HTTPClientOption) (http.RoundTripper, error) { opts := defaultHTTPClientOptions - for _, f := range optFuncs { - f(&opts) + for _, opt := range optFuncs { + opt.applyToHTTPClientOptions(&opts) } var dialContext func(ctx context.Context, network, addr string) (net.Conn, error) @@ -551,25 +617,45 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT // If a authorization_credentials is provided, create a round tripper that will set the // Authorization header correctly on each request. - if cfg.Authorization != nil && len(cfg.Authorization.CredentialsFile) > 0 { - rt = NewAuthorizationCredentialsFileRoundTripper(cfg.Authorization.Type, cfg.Authorization.CredentialsFile, rt) - } else if cfg.Authorization != nil { - rt = NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, cfg.Authorization.Credentials, rt) + if cfg.Authorization != nil { + credentialsSecret, err := toSecret(opts.secretManager, cfg.Authorization.Credentials, cfg.Authorization.CredentialsFile, cfg.Authorization.CredentialsRef) + if err != nil { + return nil, fmt.Errorf("unable to use credentials: %w", err) + } + rt = NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, credentialsSecret, rt) } // Backwards compatibility, be nice with importers who would not have // called Validate(). - if len(cfg.BearerToken) > 0 { - rt = NewAuthorizationCredentialsRoundTripper("Bearer", cfg.BearerToken, rt) - } else if len(cfg.BearerTokenFile) > 0 { - rt = NewAuthorizationCredentialsFileRoundTripper("Bearer", cfg.BearerTokenFile, rt) + if len(cfg.BearerToken) > 0 || len(cfg.BearerTokenFile) > 0 { + bearerSecret, err := toSecret(opts.secretManager, cfg.BearerToken, cfg.BearerTokenFile, "") + if err != nil { + return nil, fmt.Errorf("unable to use bearer token: %w", err) + } + rt = NewAuthorizationCredentialsRoundTripper("Bearer", bearerSecret, rt) } if cfg.BasicAuth != nil { - rt = NewBasicAuthRoundTripper(cfg.BasicAuth.Username, cfg.BasicAuth.Password, cfg.BasicAuth.UsernameFile, cfg.BasicAuth.PasswordFile, rt) + usernameSecret, err := toSecret(opts.secretManager, Secret(cfg.BasicAuth.Username), cfg.BasicAuth.UsernameFile, cfg.BasicAuth.UsernameRef) + if err != nil { + return nil, fmt.Errorf("unable to use username: %w", err) + } + passwordSecret, err := toSecret(opts.secretManager, cfg.BasicAuth.Password, cfg.BasicAuth.PasswordFile, cfg.BasicAuth.PasswordRef) + if err != nil { + return nil, fmt.Errorf("unable to use password: %w", err) + } + rt = NewBasicAuthRoundTripper(usernameSecret, passwordSecret, rt) } if cfg.OAuth2 != nil { - rt = NewOAuth2RoundTripper(cfg.OAuth2, rt, &opts) + clientSecret, err := toSecret(opts.secretManager, cfg.OAuth2.ClientSecret, cfg.OAuth2.ClientSecretFile, cfg.OAuth2.ClientSecretRef) + if err != nil { + return nil, fmt.Errorf("unable to use client secret: %w", err) + } + rt = NewOAuth2RoundTripper(clientSecret, cfg.OAuth2, rt, &opts) + } + + if cfg.HTTPHeaders != nil { + rt = NewHeadersRoundTripper(cfg.HTTPHeaders, rt) } if opts.userAgent != "" { @@ -584,115 +670,187 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT return rt, nil } - tlsConfig, err := NewTLSConfig(&cfg.TLSConfig) + tlsConfig, err := NewTLSConfig(&cfg.TLSConfig, WithSecretManager(opts.secretManager)) if err != nil { return nil, err } - if len(cfg.TLSConfig.CAFile) == 0 { + tlsSettings, err := cfg.TLSConfig.roundTripperSettings(opts.secretManager) + if err != nil { + return nil, err + } + if tlsSettings.CA == nil || tlsSettings.CA.Immutable() { // No need for a RoundTripper that reloads the CA file automatically. return newRT(tlsConfig) } - return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.roundTripperSettings(), newRT) + return NewTLSRoundTripperWithContext(ctx, tlsConfig, tlsSettings, newRT) } -type authorizationCredentialsRoundTripper struct { - authType string - authCredentials Secret - rt http.RoundTripper +// SecretManager manages secret data mapped to names known as "references" or "refs". +type SecretManager interface { + // Fetch returns the secret data given a secret name indicated by `secretRef`. + Fetch(ctx context.Context, secretRef string) (string, error) } -// NewAuthorizationCredentialsRoundTripper adds the provided credentials to a -// request unless the authorization header has already been set. -func NewAuthorizationCredentialsRoundTripper(authType string, authCredentials Secret, rt http.RoundTripper) http.RoundTripper { - return &authorizationCredentialsRoundTripper{authType, authCredentials, rt} +type SecretReader interface { + Fetch(ctx context.Context) (string, error) + Description() string + Immutable() bool } -func (rt *authorizationCredentialsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("Authorization")) == 0 { - req = cloneRequest(req) - req.Header.Set("Authorization", fmt.Sprintf("%s %s", rt.authType, string(rt.authCredentials))) +type InlineSecret struct { + text string +} + +func NewInlineSecret(text string) *InlineSecret { + return &InlineSecret{text: text} +} + +func (s *InlineSecret) Fetch(context.Context) (string, error) { + return s.text, nil +} + +func (s *InlineSecret) Description() string { + return "inline" +} + +func (s *InlineSecret) Immutable() bool { + return true +} + +type FileSecret struct { + file string +} + +func NewFileSecret(file string) *FileSecret { + return &FileSecret{file: file} +} + +func (s *FileSecret) Fetch(ctx context.Context) (string, error) { + fileBytes, err := os.ReadFile(s.file) + if err != nil { + return "", fmt.Errorf("unable to read file %s: %w", s.file, err) } - return rt.rt.RoundTrip(req) + return strings.TrimSpace(string(fileBytes)), nil } -func (rt *authorizationCredentialsRoundTripper) CloseIdleConnections() { - if ci, ok := rt.rt.(closeIdler); ok { - ci.CloseIdleConnections() +func (s *FileSecret) Description() string { + return fmt.Sprintf("file %s", s.file) +} + +func (s *FileSecret) Immutable() bool { + return false +} + +// refSecret fetches a single secret from a SecretManager. +type refSecret struct { + ref string + manager SecretManager // manager is expected to be not nil. +} + +func (s *refSecret) Fetch(ctx context.Context) (string, error) { + return s.manager.Fetch(ctx, s.ref) +} + +func (s *refSecret) Description() string { + return fmt.Sprintf("ref %s", s.ref) +} + +func (s *refSecret) Immutable() bool { + return false +} + +// toSecret returns a SecretReader from one of the given sources, assuming exactly +// one or none of the sources are provided. +func toSecret(secretManager SecretManager, text Secret, file, ref string) (SecretReader, error) { + if text != "" { + return NewInlineSecret(string(text)), nil + } + if file != "" { + return NewFileSecret(file), nil } + if ref != "" { + if secretManager == nil { + return nil, errors.New("cannot use secret ref without manager") + } + return &refSecret{ + ref: ref, + manager: secretManager, + }, nil + } + return nil, nil } -type authorizationCredentialsFileRoundTripper struct { - authType string - authCredentialsFile string - rt http.RoundTripper +type authorizationCredentialsRoundTripper struct { + authType string + authCredentials SecretReader + rt http.RoundTripper } -// NewAuthorizationCredentialsFileRoundTripper adds the authorization -// credentials read from the provided file to a request unless the authorization -// header has already been set. This file is read for every request. -func NewAuthorizationCredentialsFileRoundTripper(authType, authCredentialsFile string, rt http.RoundTripper) http.RoundTripper { - return &authorizationCredentialsFileRoundTripper{authType, authCredentialsFile, rt} +// NewAuthorizationCredentialsRoundTripper adds the authorization credentials +// read from the provided SecretReader to a request unless the authorization header +// has already been set. +func NewAuthorizationCredentialsRoundTripper(authType string, authCredentials SecretReader, rt http.RoundTripper) http.RoundTripper { + return &authorizationCredentialsRoundTripper{authType, authCredentials, rt} } -func (rt *authorizationCredentialsFileRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("Authorization")) == 0 { - b, err := os.ReadFile(rt.authCredentialsFile) +func (rt *authorizationCredentialsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + + var authCredentials string + if rt.authCredentials != nil { + var err error + authCredentials, err = rt.authCredentials.Fetch(req.Context()) if err != nil { - return nil, fmt.Errorf("unable to read authorization credentials file %s: %w", rt.authCredentialsFile, err) + return nil, fmt.Errorf("unable to read authorization credentials: %w", err) } - authCredentials := strings.TrimSpace(string(b)) - - req = cloneRequest(req) - req.Header.Set("Authorization", fmt.Sprintf("%s %s", rt.authType, authCredentials)) } + req = cloneRequest(req) + req.Header.Set("Authorization", fmt.Sprintf("%s %s", rt.authType, authCredentials)) + return rt.rt.RoundTrip(req) } -func (rt *authorizationCredentialsFileRoundTripper) CloseIdleConnections() { +func (rt *authorizationCredentialsRoundTripper) CloseIdleConnections() { if ci, ok := rt.rt.(closeIdler); ok { ci.CloseIdleConnections() } } type basicAuthRoundTripper struct { - username string - password Secret - usernameFile string - passwordFile string - rt http.RoundTripper + username SecretReader + password SecretReader + rt http.RoundTripper } // NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a request unless it has // already been set. -func NewBasicAuthRoundTripper(username string, password Secret, usernameFile, passwordFile string, rt http.RoundTripper) http.RoundTripper { - return &basicAuthRoundTripper{username, password, usernameFile, passwordFile, rt} +func NewBasicAuthRoundTripper(username SecretReader, password SecretReader, rt http.RoundTripper) http.RoundTripper { + return &basicAuthRoundTripper{username, password, rt} } func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - var username string - var password string if len(req.Header.Get("Authorization")) != 0 { return rt.rt.RoundTrip(req) } - if rt.usernameFile != "" { - usernameBytes, err := os.ReadFile(rt.usernameFile) + var username string + var password string + if rt.username != nil { + var err error + username, err = rt.username.Fetch(req.Context()) if err != nil { - return nil, fmt.Errorf("unable to read basic auth username file %s: %w", rt.usernameFile, err) + return nil, fmt.Errorf("unable to read basic auth username: %w", err) } - username = strings.TrimSpace(string(usernameBytes)) - } else { - username = rt.username } - if rt.passwordFile != "" { - passwordBytes, err := os.ReadFile(rt.passwordFile) + if rt.password != nil { + var err error + password, err = rt.password.Fetch(req.Context()) if err != nil { - return nil, fmt.Errorf("unable to read basic auth password file %s: %w", rt.passwordFile, err) + return nil, fmt.Errorf("unable to read basic auth password: %w", err) } - password = strings.TrimSpace(string(passwordBytes)) - } else { - password = string(rt.password) } req = cloneRequest(req) req.SetBasicAuth(username, password) @@ -706,104 +864,118 @@ func (rt *basicAuthRoundTripper) CloseIdleConnections() { } type oauth2RoundTripper struct { - config *OAuth2 - rt http.RoundTripper - next http.RoundTripper - secret string - mtx sync.RWMutex - opts *httpClientOptions - client *http.Client + mtx sync.RWMutex + lastRT *oauth2.Transport + lastSecret string + + // Required for interaction with Oauth2 server. + config *OAuth2 + clientSecret SecretReader + opts *httpClientOptions + client *http.Client } -func NewOAuth2RoundTripper(config *OAuth2, next http.RoundTripper, opts *httpClientOptions) http.RoundTripper { +func NewOAuth2RoundTripper(clientSecret SecretReader, config *OAuth2, next http.RoundTripper, opts *httpClientOptions) http.RoundTripper { + if clientSecret == nil { + clientSecret = NewInlineSecret("") + } + return &oauth2RoundTripper{ config: config, - next: next, - opts: opts, + // A correct tokenSource will be added later on. + lastRT: &oauth2.Transport{Base: next}, + opts: opts, + clientSecret: clientSecret, } } -func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - var ( - secret string - changed bool - ) +func (rt *oauth2RoundTripper) newOauth2TokenSource(req *http.Request, secret string) (client *http.Client, source oauth2.TokenSource, err error) { + tlsConfig, err := NewTLSConfig(&rt.config.TLSConfig, WithSecretManager(rt.opts.secretManager)) + if err != nil { + return nil, nil, err + } + + tlsTransport := func(tlsConfig *tls.Config) (http.RoundTripper, error) { + return &http.Transport{ + TLSClientConfig: tlsConfig, + Proxy: rt.config.ProxyConfig.Proxy(), + ProxyConnectHeader: rt.config.ProxyConfig.GetProxyConnectHeader(), + DisableKeepAlives: !rt.opts.keepAlivesEnabled, + MaxIdleConns: 20, + MaxIdleConnsPerHost: 1, // see https://github.com/golang/go/issues/13801 + IdleConnTimeout: 10 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, nil + } - if rt.config.ClientSecretFile != "" { - data, err := os.ReadFile(rt.config.ClientSecretFile) + var t http.RoundTripper + tlsSettings, err := rt.config.TLSConfig.roundTripperSettings(rt.opts.secretManager) + if err != nil { + return nil, nil, err + } + if tlsSettings.CA == nil || tlsSettings.CA.Immutable() { + t, _ = tlsTransport(tlsConfig) + } else { + t, err = NewTLSRoundTripperWithContext(req.Context(), tlsConfig, tlsSettings, tlsTransport) if err != nil { - return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %w", rt.config.ClientSecretFile, err) + return nil, nil, err } - secret = strings.TrimSpace(string(data)) - rt.mtx.RLock() - changed = secret != rt.secret - rt.mtx.RUnlock() - } else { - // Either an inline secret or nothing (use an empty string) was provided. - secret = string(rt.config.ClientSecret) } - if changed || rt.rt == nil { - config := &clientcredentials.Config{ - ClientID: rt.config.ClientID, - ClientSecret: secret, - Scopes: rt.config.Scopes, - TokenURL: rt.config.TokenURL, - EndpointParams: mapToValues(rt.config.EndpointParams), - } + if ua := req.UserAgent(); ua != "" { + t = NewUserAgentRoundTripper(ua, t) + } - tlsConfig, err := NewTLSConfig(&rt.config.TLSConfig) - if err != nil { - return nil, err - } + config := &clientcredentials.Config{ + ClientID: rt.config.ClientID, + ClientSecret: secret, + Scopes: rt.config.Scopes, + TokenURL: rt.config.TokenURL, + EndpointParams: mapToValues(rt.config.EndpointParams), + } + client = &http.Client{Transport: t} + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, client) + return client, config.TokenSource(ctx), nil +} - tlsTransport := func(tlsConfig *tls.Config) (http.RoundTripper, error) { - return &http.Transport{ - TLSClientConfig: tlsConfig, - Proxy: rt.config.ProxyConfig.Proxy(), - ProxyConnectHeader: rt.config.ProxyConfig.GetProxyConnectHeader(), - DisableKeepAlives: !rt.opts.keepAlivesEnabled, - MaxIdleConns: 20, - MaxIdleConnsPerHost: 1, // see https://github.com/golang/go/issues/13801 - IdleConnTimeout: 10 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - }, nil - } +func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + var ( + secret string + needsInit bool + ) - var t http.RoundTripper - if len(rt.config.TLSConfig.CAFile) == 0 { - t, _ = tlsTransport(tlsConfig) - } else { - t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.roundTripperSettings(), tlsTransport) + rt.mtx.RLock() + secret = rt.lastSecret + needsInit = rt.lastRT.Source == nil + rt.mtx.RUnlock() + + // Fetch the secret if it's our first run or always if the secret can change. + if !rt.clientSecret.Immutable() || needsInit { + newSecret, err := rt.clientSecret.Fetch(req.Context()) + if err != nil { + return nil, fmt.Errorf("unable to read oauth2 client secret: %w", err) + } + if newSecret != secret || needsInit { + // Secret changed or it's a first run. Rebuilt oauth2 setup. + client, source, err := rt.newOauth2TokenSource(req, newSecret) if err != nil { return nil, err } - } - - if ua := req.UserAgent(); ua != "" { - t = NewUserAgentRoundTripper(ua, t) - } - client := &http.Client{Transport: t} - ctx := context.WithValue(context.Background(), oauth2.HTTPClient, client) - tokenSource := config.TokenSource(ctx) - - rt.mtx.Lock() - rt.secret = secret - rt.rt = &oauth2.Transport{ - Base: rt.next, - Source: tokenSource, - } - if rt.client != nil { - rt.client.CloseIdleConnections() + rt.mtx.Lock() + rt.lastSecret = secret + rt.lastRT.Source = source + if rt.client != nil { + rt.client.CloseIdleConnections() + } + rt.client = client + rt.mtx.Unlock() } - rt.client = client - rt.mtx.Unlock() } rt.mtx.RLock() - currentRT := rt.rt + currentRT := rt.lastRT rt.mtx.RUnlock() return currentRT.RoundTrip(req) } @@ -812,7 +984,7 @@ func (rt *oauth2RoundTripper) CloseIdleConnections() { if rt.client != nil { rt.client.CloseIdleConnections() } - if ci, ok := rt.next.(closeIdler); ok { + if ci, ok := rt.lastRT.Base.(closeIdler); ok { ci.CloseIdleConnections() } } @@ -840,8 +1012,27 @@ func cloneRequest(r *http.Request) *http.Request { return r2 } +type tlsConfigOptions struct { + secretManager SecretManager +} + +// TLSConfigOption defines an option that can be applied to the HTTP client. +type TLSConfigOption interface { + applyToTLSConfigOptions(options *tlsConfigOptions) +} + // NewTLSConfig creates a new tls.Config from the given TLSConfig. -func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { +func NewTLSConfig(cfg *TLSConfig, optFuncs ...TLSConfigOption) (*tls.Config, error) { + return NewTLSConfigWithContext(context.Background(), cfg, optFuncs...) +} + +// NewTLSConfigWithContext creates a new tls.Config from the given TLSConfig. +func NewTLSConfigWithContext(ctx context.Context, cfg *TLSConfig, optFuncs ...TLSConfigOption) (*tls.Config, error) { + opts := tlsConfigOptions{} + for _, opt := range optFuncs { + opt.applyToTLSConfigOptions(&opts) + } + if err := cfg.Validate(); err != nil { return nil, err } @@ -860,17 +1051,17 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { // If a CA cert is provided then let's read it in so we can validate the // scrape target's certificate properly. - if len(cfg.CA) > 0 { - if !updateRootCA(tlsConfig, []byte(cfg.CA)) { - return nil, fmt.Errorf("unable to use inline CA cert") - } - } else if len(cfg.CAFile) > 0 { - b, err := readCAFile(cfg.CAFile) + caSecret, err := toSecret(opts.secretManager, Secret(cfg.CA), cfg.CAFile, cfg.CARef) + if err != nil { + return nil, fmt.Errorf("unable to use CA cert: %w", err) + } + if caSecret != nil { + ca, err := caSecret.Fetch(ctx) if err != nil { - return nil, err + return nil, fmt.Errorf("unable to read CA cert: %w", err) } - if !updateRootCA(tlsConfig, b) { - return nil, fmt.Errorf("unable to use specified CA cert %s", cfg.CAFile) + if !updateRootCA(tlsConfig, []byte(ca)) { + return nil, fmt.Errorf("unable to use specified CA cert %s", caSecret.Description()) } } @@ -881,10 +1072,16 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { // If a client cert & key is provided then configure TLS config accordingly. if cfg.usingClientCert() && cfg.usingClientKey() { // Verify that client cert and key are valid. - if _, err := cfg.getClientCertificate(nil); err != nil { + if _, err := cfg.getClientCertificate(ctx, opts.secretManager); err != nil { return nil, err } - tlsConfig.GetClientCertificate = cfg.getClientCertificate + tlsConfig.GetClientCertificate = func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) { + var ctx context.Context + if cri != nil { + ctx = cri.Context() + } + return cfg.getClientCertificate(ctx, opts.secretManager) + } } return tlsConfig, nil @@ -904,6 +1101,15 @@ type TLSConfig struct { CertFile string `yaml:"cert_file,omitempty" json:"cert_file,omitempty"` // The client key file for the targets. KeyFile string `yaml:"key_file,omitempty" json:"key_file,omitempty"` + // CARef is the name of the secret within the secret manager to use as the CA cert for the + // targets. + CARef string `yaml:"ca_ref,omitempty" json:"ca_ref,omitempty"` + // CertRef is the name of the secret within the secret manager to use as the client cert for + // the targets. + CertRef string `yaml:"cert_ref,omitempty" json:"cert_ref,omitempty"` + // KeyRef is the name of the secret within the secret manager to use as the client key for + // the targets. + KeyRef string `yaml:"key_ref,omitempty" json:"key_ref,omitempty"` // Used to verify the hostname for the targets. ServerName string `yaml:"server_name,omitempty" json:"server_name,omitempty"` // Disable target certificate validation. @@ -937,13 +1143,13 @@ func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // file-based fields for the TLS CA, client certificate, and client key are // used. func (c *TLSConfig) Validate() error { - if len(c.CA) > 0 && len(c.CAFile) > 0 { - return fmt.Errorf("at most one of ca and ca_file must be configured") + if nonZeroCount(len(c.CA) > 0, len(c.CAFile) > 0, len(c.CARef) > 0) > 1 { + return fmt.Errorf("at most one of ca, ca_file & ca_ref must be configured") } - if len(c.Cert) > 0 && len(c.CertFile) > 0 { - return fmt.Errorf("at most one of cert and cert_file must be configured") + if nonZeroCount(len(c.Cert) > 0, len(c.CertFile) > 0, len(c.CertRef) > 0) > 1 { + return fmt.Errorf("at most one of cert, cert_file & cert_ref must be configured") } - if len(c.Key) > 0 && len(c.KeyFile) > 0 { + if nonZeroCount(len(c.Key) > 0, len(c.KeyFile) > 0, len(c.KeyRef) > 0) > 1 { return fmt.Errorf("at most one of key and key_file must be configured") } @@ -957,66 +1163,70 @@ func (c *TLSConfig) Validate() error { } func (c *TLSConfig) usingClientCert() bool { - return len(c.Cert) > 0 || len(c.CertFile) > 0 + return len(c.Cert) > 0 || len(c.CertFile) > 0 || len(c.CertRef) > 0 } func (c *TLSConfig) usingClientKey() bool { - return len(c.Key) > 0 || len(c.KeyFile) > 0 + return len(c.Key) > 0 || len(c.KeyFile) > 0 || len(c.KeyRef) > 0 } -func (c *TLSConfig) roundTripperSettings() TLSRoundTripperSettings { - return TLSRoundTripperSettings{ - CA: c.CA, - CAFile: c.CAFile, - Cert: c.Cert, - CertFile: c.CertFile, - Key: string(c.Key), - KeyFile: c.KeyFile, +func (c *TLSConfig) roundTripperSettings(secretManager SecretManager) (TLSRoundTripperSettings, error) { + ca, err := toSecret(secretManager, Secret(c.CA), c.CAFile, c.CARef) + if err != nil { + return TLSRoundTripperSettings{}, err + } + cert, err := toSecret(secretManager, Secret(c.Cert), c.CertFile, c.CertRef) + if err != nil { + return TLSRoundTripperSettings{}, err + } + key, err := toSecret(secretManager, c.Key, c.KeyFile, c.KeyRef) + if err != nil { + return TLSRoundTripperSettings{}, err } + return TLSRoundTripperSettings{ + CA: ca, + Cert: cert, + Key: key, + }, nil } -// getClientCertificate reads the pair of client cert and key from disk and returns a tls.Certificate. -func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) { +// getClientCertificate reads the pair of client cert and key and returns a tls.Certificate. +func (c *TLSConfig) getClientCertificate(ctx context.Context, secretManager SecretManager) (*tls.Certificate, error) { var ( - certData, keyData []byte + certData, keyData string err error ) - if c.CertFile != "" { - certData, err = os.ReadFile(c.CertFile) + certSecret, err := toSecret(secretManager, Secret(c.Cert), c.CertFile, c.CertRef) + if err != nil { + return nil, fmt.Errorf("unable to use client cert: %w", err) + } + if certSecret != nil { + certData, err = certSecret.Fetch(ctx) if err != nil { - return nil, fmt.Errorf("unable to read specified client cert (%s): %w", c.CertFile, err) + return nil, fmt.Errorf("unable to read specified client cert: %w", err) } - } else { - certData = []byte(c.Cert) } - if c.KeyFile != "" { - keyData, err = os.ReadFile(c.KeyFile) + keySecret, err := toSecret(secretManager, Secret(c.Key), c.KeyFile, c.KeyRef) + if err != nil { + return nil, fmt.Errorf("unable to use client key: %w", err) + } + if keySecret != nil { + keyData, err = keySecret.Fetch(ctx) if err != nil { - return nil, fmt.Errorf("unable to read specified client key (%s): %w", c.KeyFile, err) + return nil, fmt.Errorf("unable to read specified client key: %w", err) } - } else { - keyData = []byte(c.Key) } - cert, err := tls.X509KeyPair(certData, keyData) + cert, err := tls.X509KeyPair([]byte(certData), []byte(keyData)) if err != nil { - return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %w", c.CertFile, c.KeyFile, err) + return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %w", certSecret.Description(), keySecret.Description(), err) } return &cert, nil } -// readCAFile reads the CA cert file from disk. -func readCAFile(f string) ([]byte, error) { - data, err := os.ReadFile(f) - if err != nil { - return nil, fmt.Errorf("unable to load specified CA cert %s: %w", f, err) - } - return data, nil -} - // updateRootCA parses the given byte slice as a series of PEM encoded certificates and updates tls.Config.RootCAs. func updateRootCA(cfg *tls.Config, b []byte) bool { caCertPool := x509.NewCertPool() @@ -1044,15 +1254,24 @@ type tlsRoundTripper struct { } type TLSRoundTripperSettings struct { - CA, CAFile string - Cert, CertFile string - Key, KeyFile string + CA SecretReader + Cert SecretReader + Key SecretReader } func NewTLSRoundTripper( cfg *tls.Config, settings TLSRoundTripperSettings, newRT func(*tls.Config) (http.RoundTripper, error), +) (http.RoundTripper, error) { + return NewTLSRoundTripperWithContext(context.Background(), cfg, settings, newRT) +} + +func NewTLSRoundTripperWithContext( + ctx context.Context, + cfg *tls.Config, + settings TLSRoundTripperSettings, + newRT func(*tls.Config) (http.RoundTripper, error), ) (http.RoundTripper, error) { t := &tlsRoundTripper{ settings: settings, @@ -1065,7 +1284,7 @@ func NewTLSRoundTripper( return nil, err } t.rt = rt - _, t.hashCAData, t.hashCertData, t.hashKeyData, err = t.getTLSDataWithHash() + _, t.hashCAData, t.hashCertData, t.hashKeyData, err = t.getTLSDataWithHash(ctx) if err != nil { return nil, err } @@ -1073,38 +1292,31 @@ func NewTLSRoundTripper( return t, nil } -func (t *tlsRoundTripper) getTLSDataWithHash() ([]byte, []byte, []byte, []byte, error) { - var ( - caBytes, certBytes, keyBytes []byte - - err error - ) +func (t *tlsRoundTripper) getTLSDataWithHash(ctx context.Context) ([]byte, []byte, []byte, []byte, error) { + var caBytes, certBytes, keyBytes []byte - if t.settings.CAFile != "" { - caBytes, err = os.ReadFile(t.settings.CAFile) + if t.settings.CA != nil { + ca, err := t.settings.CA.Fetch(ctx) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("unable to read CA cert: %w", err) } - } else if t.settings.CA != "" { - caBytes = []byte(t.settings.CA) + caBytes = []byte(ca) } - if t.settings.CertFile != "" { - certBytes, err = os.ReadFile(t.settings.CertFile) + if t.settings.Cert != nil { + cert, err := t.settings.Cert.Fetch(ctx) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("unable to read client cert: %w", err) } - } else if t.settings.Cert != "" { - certBytes = []byte(t.settings.Cert) + certBytes = []byte(cert) } - if t.settings.KeyFile != "" { - keyBytes, err = os.ReadFile(t.settings.KeyFile) + if t.settings.Key != nil { + key, err := t.settings.Key.Fetch(ctx) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, fmt.Errorf("unable to read client key: %w", err) } - } else if t.settings.Key != "" { - keyBytes = []byte(t.settings.Key) + keyBytes = []byte(key) } var caHash, certHash, keyHash [32]byte @@ -1124,7 +1336,7 @@ func (t *tlsRoundTripper) getTLSDataWithHash() ([]byte, []byte, []byte, []byte, // RoundTrip implements the http.RoundTrip interface. func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - caData, caHash, certHash, keyHash, err := t.getTLSDataWithHash() + caData, caHash, certHash, keyHash, err := t.getTLSDataWithHash(req.Context()) if err != nil { return nil, err } @@ -1145,7 +1357,7 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { // using GetClientCertificate. tlsConfig := t.tlsConfig.Clone() if !updateRootCA(tlsConfig, caData) { - return nil, fmt.Errorf("unable to use specified CA cert %s", t.settings.CAFile) + return nil, fmt.Errorf("unable to use specified CA cert %s", t.settings.CA.Description()) } rt, err = t.newRT(tlsConfig) if err != nil { @@ -1236,7 +1448,7 @@ type ProxyConfig struct { // proxies during CONNECT requests. Assume that at least _some_ of // these headers are going to contain secrets and use Secret as the // value type instead of string. - ProxyConnectHeader Header `yaml:"proxy_connect_header,omitempty" json:"proxy_connect_header,omitempty"` + ProxyConnectHeader ProxyHeader `yaml:"proxy_connect_header,omitempty" json:"proxy_connect_header,omitempty"` proxyFunc func(*http.Request) (*url.URL, error) } diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 7f6cbe7d298..ff5ef7a9d92 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -21,9 +21,10 @@ import ( "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" "github.com/prometheus/common/model" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" ) diff --git a/vendor/github.com/prometheus/common/helpers/templates/time.go b/vendor/github.com/prometheus/common/helpers/templates/time.go new file mode 100644 index 00000000000..266c8c992fd --- /dev/null +++ b/vendor/github.com/prometheus/common/helpers/templates/time.go @@ -0,0 +1,89 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package templates + +import ( + "fmt" + "math" + "strconv" + "time" +) + +func convertToFloat(i interface{}) (float64, error) { + switch v := i.(type) { + case float64: + return v, nil + case string: + return strconv.ParseFloat(v, 64) + case int: + return float64(v), nil + case uint: + return float64(v), nil + case int64: + return float64(v), nil + case uint64: + return float64(v), nil + case time.Duration: + return v.Seconds(), nil + default: + return 0, fmt.Errorf("can't convert %T to float", v) + } +} + +func HumanizeDuration(i interface{}) (string, error) { + v, err := convertToFloat(i) + if err != nil { + return "", err + } + + if math.IsNaN(v) || math.IsInf(v, 0) { + return fmt.Sprintf("%.4g", v), nil + } + if v == 0 { + return fmt.Sprintf("%.4gs", v), nil + } + if math.Abs(v) >= 1 { + sign := "" + if v < 0 { + sign = "-" + v = -v + } + duration := int64(v) + seconds := duration % 60 + minutes := (duration / 60) % 60 + hours := (duration / 60 / 60) % 24 + days := duration / 60 / 60 / 24 + // For days to minutes, we display seconds as an integer. + if days != 0 { + return fmt.Sprintf("%s%dd %dh %dm %ds", sign, days, hours, minutes, seconds), nil + } + if hours != 0 { + return fmt.Sprintf("%s%dh %dm %ds", sign, hours, minutes, seconds), nil + } + if minutes != 0 { + return fmt.Sprintf("%s%dm %ds", sign, minutes, seconds), nil + } + // For seconds, we display 4 significant digits. + return fmt.Sprintf("%s%.4gs", sign, v), nil + } + prefix := "" + for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} { + if math.Abs(v) >= 1 { + break + } + prefix = p + v *= 1000 + } + return fmt.Sprintf("%.4g%ss", v, prefix), nil +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d58d..00000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index a21b9d15dd8..00000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index d02081d2e16..9defa10d485 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -154,7 +154,7 @@ var ( DefaultRuntimeConfig = RuntimeConfig{ // Go runtime tuning. - GoGC: 50, + GoGC: 75, } // DefaultScrapeConfig is the default scrape configuration. diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index f14071af309..897d7d151cf 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -120,6 +120,16 @@ func Name(n string) func(*Manager) { } } +// Updatert sets the updatert of the manager. +// Used to speed up tests. +func Updatert(u time.Duration) func(*Manager) { + return func(m *Manager) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.updatert = u + } +} + // HTTPClientOptions sets the list of HTTP client options to expose to // Discoverers. It is up to Discoverers to choose to use the options provided. func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) { diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go index f46321c97e7..4bc94f84fe5 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go @@ -18,6 +18,7 @@ import ( "encoding/json" "slices" "strconv" + "unsafe" "github.com/prometheus/common/model" ) @@ -215,3 +216,7 @@ func contains(s []Label, n string) bool { } return false } + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go index dfc74aa3a3d..972f5dc164e 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go @@ -20,7 +20,6 @@ import ( "slices" "strings" "sync" - "unsafe" "github.com/cespare/xxhash/v2" ) @@ -426,10 +425,6 @@ func EmptyLabels() Labels { return Labels{} } -func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) -} - // New returns a sorted Labels from the given labels. // The caller has to guarantee that all label names are unique. // Note this function is not efficient; should not be used in performance-critical places. diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go index 9ef764daecb..bccceb61fe1 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go @@ -299,11 +299,6 @@ func Equal(ls, o Labels) bool { func EmptyLabels() Labels { return Labels{} } - -func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) -} - func yoloBytes(s string) (b []byte) { *(*string)(unsafe.Pointer(&b)) = s (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) diff --git a/vendor/github.com/prometheus/prometheus/model/labels/matcher.go b/vendor/github.com/prometheus/prometheus/model/labels/matcher.go index 8e220e392d8..a09c838e3f8 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/matcher.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/matcher.go @@ -101,7 +101,7 @@ func (m *Matcher) shouldQuoteName() bool { } return true } - return false + return len(m.Name) == 0 } // Matches returns whether the matcher matches the given string value. diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go index f228d7ff1f3..1e9db882bf8 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go @@ -16,10 +16,12 @@ package labels import ( "slices" "strings" + "unicode" "unicode/utf8" "github.com/grafana/regexp" "github.com/grafana/regexp/syntax" + "golang.org/x/text/unicode/norm" ) const ( @@ -766,7 +768,7 @@ type equalMultiStringMapMatcher struct { func (m *equalMultiStringMapMatcher) add(s string) { if !m.caseSensitive { - s = strings.ToLower(s) + s = toNormalisedLower(s) } m.values[s] = struct{}{} @@ -786,13 +788,35 @@ func (m *equalMultiStringMapMatcher) setMatches() []string { func (m *equalMultiStringMapMatcher) Matches(s string) bool { if !m.caseSensitive { - s = strings.ToLower(s) + s = toNormalisedLower(s) } _, ok := m.values[s] return ok } +// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert +// it to lower case. +func toNormalisedLower(s string) string { + var buf []byte + for i := 0; i < len(s); i++ { + c := s[i] + if c >= utf8.RuneSelf { + return strings.Map(unicode.ToLower, norm.NFKD.String(s)) + } + if 'A' <= c && c <= 'Z' { + if buf == nil { + buf = []byte(s) + } + buf[i] = c + 'a' - 'A' + } + } + if buf == nil { + return s + } + return yoloString(buf) +} + // anyStringWithoutNewlineMatcher is a stringMatcher which matches any string // (including an empty one) as far as it doesn't contain any newline character. type anyStringWithoutNewlineMatcher struct{} diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 4cf376aa05f..cd00a4507d2 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -298,25 +298,14 @@ func (n *Manager) nextBatch() []*Alert { return alerts } -// Run dispatches notifications continuously. -func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { +// sendLoop continuously consumes the notifications queue and sends alerts to +// the configured Alertmanagers. +func (n *Manager) sendLoop() { for { - // The select is split in two parts, such as we will first try to read - // new alertmanager targets if they are available, before sending new - // alerts. select { case <-n.ctx.Done(): return - case ts := <-tsets: - n.reload(ts) - default: - select { - case <-n.ctx.Done(): - return - case ts := <-tsets: - n.reload(ts) - case <-n.more: - } + case <-n.more: } alerts := n.nextBatch() @@ -330,6 +319,21 @@ func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { } } +// Run receives updates of target groups and triggers a reload. +// The dispatching of notifications occurs in the background to prevent blocking the receipt of target updates. +// Refer to https://github.com/prometheus/prometheus/issues/13676 for more details. +func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { + go n.sendLoop() + for { + select { + case <-n.ctx.Done(): + return + case ts := <-tsets: + n.reload(ts) + } + } +} + func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { n.mtx.Lock() defer n.mtx.Unlock() @@ -471,10 +475,6 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { numSuccess atomic.Uint64 ) for _, ams := range amSets { - if len(ams.ams) == 0 { - continue - } - var ( payload []byte err error @@ -483,6 +483,11 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { ams.mtx.RLock() + if len(ams.ams) == 0 { + ams.mtx.RUnlock() + continue + } + if len(ams.cfg.AlertRelabelConfigs) > 0 { amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts) if len(amAlerts) == 0 { @@ -611,6 +616,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b }() // Any HTTP status 2xx is OK. + //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return fmt.Errorf("bad response status %s", resp.Status) } diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 83e44e61f9d..2a84871f00d 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -2740,7 +2740,7 @@ type groupedAggregation struct { hasHistogram bool // Has at least 1 histogram sample aggregated. floatValue float64 histogramValue *histogram.FloatHistogram - floatMean float64 + floatMean float64 // Mean, or "compensating value" for Kahan summation. groupCount int heap vectorByValueHeap } @@ -2768,11 +2768,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix *group = groupedAggregation{ seen: true, floatValue: f, - floatMean: f, groupCount: 1, } switch op { - case parser.SUM, parser.AVG: + case parser.AVG: + group.floatMean = f + fallthrough + case parser.SUM: if h == nil { group.hasFloat = true } else { @@ -2780,6 +2782,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix group.hasHistogram = true } case parser.STDVAR, parser.STDDEV: + group.floatMean = f group.floatValue = 0 case parser.QUANTILE: group.heap = make(vectorByValueHeap, 1) @@ -2802,7 +2805,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // point in copying the histogram in that case. } else { group.hasFloat = true - group.floatValue += f + group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean) } case parser.AVG: @@ -2913,6 +2916,8 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix } if aggr.hasHistogram { aggr.histogramValue.Compact(0) + } else { + aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term. } default: // For other aggregations, we already have the right value. diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go index 140194ec71c..e8791b643ad 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go @@ -231,6 +231,7 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { httpResp.Body.Close() }() + //nolint:usestdlibvars if httpResp.StatusCode/100 != 2 { scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen)) line := "" @@ -239,6 +240,7 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { } err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line) } + //nolint:usestdlibvars if httpResp.StatusCode/100 == 5 || (c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) { return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))} @@ -323,6 +325,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err) } + //nolint:usestdlibvars if httpResp.StatusCode/100 != 2 { return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed))) } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index 1228b23f5c5..8c569ff0388 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -95,7 +95,7 @@ func EncodeReadResponse(resp *prompb.ReadResponse, w http.ResponseWriter) error // ToQuery builds a Query proto. func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHints) (*prompb.Query, error) { - ms, err := toLabelMatchers(matchers) + ms, err := ToLabelMatchers(matchers) if err != nil { return nil, err } @@ -166,7 +166,7 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, } resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{ - Labels: labelsToLabelsProto(series.Labels(), nil), + Labels: LabelsToLabelsProto(series.Labels(), nil), Samples: samples, Histograms: histograms, }) @@ -182,7 +182,7 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet if err := validateLabelsAndMetricName(ts.Labels); err != nil { return errSeriesSet{err: err} } - lbls := labelProtosToLabels(&b, ts.Labels) + lbls := LabelProtosToLabels(&b, ts.Labels) series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms}) } @@ -235,7 +235,7 @@ func StreamChunkedReadResponses( for ss.Next() { series := ss.At() iter = series.Iterator(iter) - lbls = MergeLabels(labelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels) + lbls = MergeLabels(LabelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels) maxDataLength := maxBytesInFrame for _, lbl := range lbls { @@ -566,7 +566,8 @@ func validateLabelsAndMetricName(ls []prompb.Label) error { return nil } -func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) { +// ToLabelMatchers converts Prometheus label matchers to protobuf label matchers. +func ToLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) { pbMatchers := make([]*prompb.LabelMatcher, 0, len(matchers)) for _, m := range matchers { var mType prompb.LabelMatcher_Type @@ -591,7 +592,7 @@ func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) return pbMatchers, nil } -// FromLabelMatchers parses protobuf label matchers to Prometheus label matchers. +// FromLabelMatchers converts protobuf label matchers to Prometheus label matchers. func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) { result := make([]*labels.Matcher, 0, len(matchers)) for _, matcher := range matchers { @@ -621,7 +622,7 @@ func exemplarProtoToExemplar(b *labels.ScratchBuilder, ep prompb.Exemplar) exemp timestamp := ep.Timestamp return exemplar.Exemplar{ - Labels: labelProtosToLabels(b, ep.Labels), + Labels: LabelProtosToLabels(b, ep.Labels), Value: ep.Value, Ts: timestamp, HasTs: timestamp != 0, @@ -761,7 +762,9 @@ func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric { return metric } -func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) labels.Labels { +// LabelProtosToLabels transforms prompb labels into labels. The labels builder +// will be used to build the returned labels. +func LabelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) labels.Labels { b.Reset() for _, l := range labelPairs { b.Add(l.Name, l.Value) @@ -770,9 +773,9 @@ func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) la return b.Labels() } -// labelsToLabelsProto transforms labels into prompb labels. The buffer slice +// LabelsToLabelsProto transforms labels into prompb labels. The buffer slice // will be used to avoid allocations if it is big enough to store the labels. -func labelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label { +func LabelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label { result := buf[:0] lbls.Range(func(l labels.Label) { result = append(result, prompb.Label{ diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index 01d2db06a5c..dde78d35e58 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -16,6 +16,7 @@ package remote import ( "context" "errors" + "fmt" "math" "strconv" "sync" @@ -1224,12 +1225,16 @@ func (s *shards) stop() { // Force an unclean shutdown. s.hardShutdown() <-s.done - if dropped := s.samplesDroppedOnHardShutdown.Load(); dropped > 0 { - level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown", "count", dropped) - } - if dropped := s.exemplarsDroppedOnHardShutdown.Load(); dropped > 0 { - level.Error(s.qm.logger).Log("msg", "Failed to flush all exemplars on shutdown", "count", dropped) + + // Log error for any dropped samples, exemplars, or histograms. + logDroppedError := func(t string, counter atomic.Uint32) { + if dropped := counter.Load(); dropped > 0 { + level.Error(s.qm.logger).Log("msg", fmt.Sprintf("Failed to flush all %s on shutdown", t), "count", dropped) + } } + logDroppedError("samples", s.samplesDroppedOnHardShutdown) + logDroppedError("exemplars", s.exemplarsDroppedOnHardShutdown) + logDroppedError("histograms", s.histogramsDroppedOnHardShutdown) } // enqueue data (sample or exemplar). If the shard is full, shutting down, or @@ -1507,7 +1512,7 @@ func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.Tim // Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff) // retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll // stop reading from the queue. This makes it safe to reference pendingSamples by index. - pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels) + pendingData[nPending].Labels = LabelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels) switch d.sType { case tSample: pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{ @@ -1517,7 +1522,7 @@ func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.Tim nPendingSamples++ case tExemplar: pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{ - Labels: labelsToLabelsProto(d.exemplarLabels, nil), + Labels: LabelsToLabelsProto(d.exemplarLabels, nil), Value: d.value, Timestamp: d.timestamp, }) @@ -1537,7 +1542,7 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s begin := time.Now() err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, pBuf, buf) if err != nil { - level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err) + level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "histogramCount", histogramCount, "err", err) s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount)) s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount)) s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount)) @@ -1778,9 +1783,11 @@ func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeri if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp < lowest { lowest = ts.Histograms[0].Timestamp } - - // Move the current element to the write position and increment the write pointer - timeSeries[keepIdx] = timeSeries[i] + if i != keepIdx { + // We have to swap the kept timeseries with the one which should be dropped. + // Copying any elements within timeSeries could cause data corruptions when reusing the slice in a next batch (shards.populateTimeSeries). + timeSeries[keepIdx], timeSeries[i] = timeSeries[i], timeSeries[keepIdx] + } keepIdx++ } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index ff227292b8a..0832c65abe1 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net/http" + "time" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -25,7 +26,9 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage" otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" @@ -38,6 +41,8 @@ type writeHandler struct { samplesWithInvalidLabelsTotal prometheus.Counter } +const maxAheadTime = 10 * time.Minute + // NewWriteHandler creates a http.Handler that accepts remote write requests and // writes them to the provided appendable. func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler { @@ -104,19 +109,24 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err outOfOrderExemplarErrs := 0 samplesWithInvalidLabels := 0 - app := h.appendable.Appender(ctx) + timeLimitApp := &timeLimitAppender{ + Appender: h.appendable.Appender(ctx), + maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), + } + defer func() { if err != nil { - _ = app.Rollback() + _ = timeLimitApp.Rollback() return } - err = app.Commit() + err = timeLimitApp.Commit() }() b := labels.NewScratchBuilder(0) var exemplarErr error + for _, ts := range req.Timeseries { - labels := labelProtosToLabels(&b, ts.Labels) + labels := LabelProtosToLabels(&b, ts.Labels) if !labels.IsValid() { level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String()) samplesWithInvalidLabels++ @@ -124,7 +134,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err } var ref storage.SeriesRef for _, s := range ts.Samples { - ref, err = app.Append(ref, labels, s.Timestamp, s.Value) + ref, err = timeLimitApp.Append(ref, labels, s.Timestamp, s.Value) if err != nil { unwrappedErr := errors.Unwrap(err) if unwrappedErr == nil { @@ -140,7 +150,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err for _, ep := range ts.Exemplars { e := exemplarProtoToExemplar(&b, ep) - _, exemplarErr = app.AppendExemplar(0, labels, e) + _, exemplarErr = timeLimitApp.AppendExemplar(0, labels, e) exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs) if exemplarErr != nil { // Since exemplar storage is still experimental, we don't fail the request on ingestion errors. @@ -151,11 +161,12 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err for _, hp := range ts.Histograms { if hp.IsFloatHistogram() { fhs := FloatHistogramProtoToFloatHistogram(hp) - _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs) + _, err = timeLimitApp.AppendHistogram(0, labels, hp.Timestamp, nil, fhs) } else { hs := HistogramProtoToHistogram(hp) - _, err = app.AppendHistogram(0, labels, hp.Timestamp, hs, nil) + _, err = timeLimitApp.AppendHistogram(0, labels, hp.Timestamp, hs, nil) } + if err != nil { unwrappedErr := errors.Unwrap(err) if unwrappedErr == nil { @@ -233,3 +244,45 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) } + +type timeLimitAppender struct { + storage.Appender + + maxTime int64 +} + +func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + if t > app.maxTime { + return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds) + } + + ref, err := app.Appender.Append(ref, lset, t, v) + if err != nil { + return 0, err + } + return ref, nil +} + +func (app *timeLimitAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if t > app.maxTime { + return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds) + } + + ref, err := app.Appender.AppendHistogram(ref, l, t, h, fh) + if err != nil { + return 0, err + } + return ref, nil +} + +func (app *timeLimitAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + if e.Ts > app.maxTime { + return 0, fmt.Errorf("%w: timestamp is too far in the future", storage.ErrOutOfBounds) + } + + ref, err := app.Appender.AppendExemplar(ref, l, e) + if err != nil { + return 0, err + } + return ref, nil +} diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go index 43772805cd1..dbe1607cfa7 100644 --- a/vendor/github.com/prometheus/prometheus/template/template.go +++ b/vendor/github.com/prometheus/prometheus/template/template.go @@ -32,6 +32,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + common_templates "github.com/prometheus/common/helpers/templates" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/util/strutil" ) @@ -263,51 +265,7 @@ func NewTemplateExpander( } return fmt.Sprintf("%.4g%s", v, prefix), nil }, - "humanizeDuration": func(i interface{}) (string, error) { - v, err := convertToFloat(i) - if err != nil { - return "", err - } - if math.IsNaN(v) || math.IsInf(v, 0) { - return fmt.Sprintf("%.4g", v), nil - } - if v == 0 { - return fmt.Sprintf("%.4gs", v), nil - } - if math.Abs(v) >= 1 { - sign := "" - if v < 0 { - sign = "-" - v = -v - } - duration := int64(v) - seconds := duration % 60 - minutes := (duration / 60) % 60 - hours := (duration / 60 / 60) % 24 - days := duration / 60 / 60 / 24 - // For days to minutes, we display seconds as an integer. - if days != 0 { - return fmt.Sprintf("%s%dd %dh %dm %ds", sign, days, hours, minutes, seconds), nil - } - if hours != 0 { - return fmt.Sprintf("%s%dh %dm %ds", sign, hours, minutes, seconds), nil - } - if minutes != 0 { - return fmt.Sprintf("%s%dm %ds", sign, minutes, seconds), nil - } - // For seconds, we display 4 significant digits. - return fmt.Sprintf("%s%.4gs", sign, v), nil - } - prefix := "" - for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} { - if math.Abs(v) >= 1 { - break - } - prefix = p - v *= 1000 - } - return fmt.Sprintf("%.4g%ss", v, prefix), nil - }, + "humanizeDuration": common_templates.HumanizeDuration, "humanizePercentage": func(i interface{}) (string, error) { v, err := convertToFloat(i) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 83b86a58d16..2f32733f8c4 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -103,9 +103,9 @@ type IndexReader interface { // storage.ErrNotFound is returned as error. LabelValueFor(ctx context.Context, id storage.SeriesRef, label string) (string, error) - // LabelNamesFor returns all the label names for the series referred to by IDs. + // LabelNamesFor returns all the label names for the series referred to by the postings. // The names returned are sorted. - LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) + LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) // Close releases the underlying resources of the reader. Close() error @@ -551,10 +551,10 @@ func (r blockIndexReader) LabelValueFor(ctx context.Context, id storage.SeriesRe return r.ir.LabelValueFor(ctx, id, label) } -// LabelNamesFor returns all the label names for the series referred to by IDs. +// LabelNamesFor returns all the label names for the series referred to by the postings. // The names returned are sorted. -func (r blockIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { - return r.ir.LabelNamesFor(ctx, ids...) +func (r blockIndexReader) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) { + return r.ir.LabelNamesFor(ctx, postings) } type blockTombstoneReader struct { @@ -646,10 +646,10 @@ Outer: } // CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones). -// If there was a rewrite, then it returns the ULID of the new block written, else nil. -// If the resultant block is empty (tombstones covered the whole block), then it deletes the new block and return nil UID. +// If there was a rewrite, then it returns the ULID of new blocks written, else nil. +// If a resultant block is empty (tombstones covered the whole block), then it returns an empty slice. // It returns a boolean indicating if the parent block can be deleted safely of not. -func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, error) { +func (pb *Block) CleanTombstones(dest string, c Compactor) ([]ulid.ULID, bool, error) { numStones := 0 if err := pb.tombstones.Iter(func(id storage.SeriesRef, ivs tombstones.Intervals) error { @@ -664,12 +664,12 @@ func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, bool, er } meta := pb.Meta() - uid, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta) + uids, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta) if err != nil { return nil, false, err } - return &uid, true, nil + return uids, true, nil } // Snapshot creates snapshot of the block into dir. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go index 32346d69d00..232ec2b9148 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go @@ -105,12 +105,17 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { if err != nil { return ulid.ULID{}, fmt.Errorf("create leveled compactor: %w", err) } - id, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil) + ids, err := compactor.Write(w.destinationDir, w.head, mint, maxt, nil) if err != nil { return ulid.ULID{}, fmt.Errorf("compactor write: %w", err) } - return id, nil + // No block was produced. Caller is responsible to check empty + // ulid.ULID based on its use case. + if len(ids) == 0 { + return ulid.ULID{}, nil + } + return ids[0], nil } func (w *BlockWriter) Close() error { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index c2ae23b2e40..9ef42b339b7 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -58,19 +58,23 @@ type Compactor interface { // Results returned when compactions are in progress are undefined. Plan(dir string) ([]string, error) - // Write persists a Block into a directory. - // No Block is written when resulting Block has 0 samples, and returns empty ulid.ULID{}. - Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) (ulid.ULID, error) + // Write persists one or more Blocks into a directory. + // No Block is written when resulting Block has 0 samples and returns an empty slice. + // Prometheus always return one or no block. The interface allows returning more than one + // block for downstream users to experiment with compactor. + Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) ([]ulid.ULID, error) // Compact runs compaction against the provided directories. Must // only be called concurrently with results of Plan(). // Can optionally pass a list of already open blocks, // to avoid having to reopen them. - // When resulting Block has 0 samples + // Prometheus always return one or no block. The interface allows returning more than one + // block for downstream users to experiment with compactor. + // When one resulting Block has 0 samples // * No block is written. // * The source dirs are marked Deletable. - // * Returns empty ulid.ULID{}. - Compact(dest string, dirs []string, open []*Block) (ulid.ULID, error) + // * Block is not included in the result. + Compact(dest string, dirs []string, open []*Block) ([]ulid.ULID, error) } // LeveledCompactor implements the Compactor interface. @@ -441,11 +445,11 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { // Compact creates a new block in the compactor's directory from the blocks in the // provided directories. -func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) { +func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) ([]ulid.ULID, error) { return c.CompactWithBlockPopulator(dest, dirs, open, DefaultBlockPopulator{}) } -func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator) (uid ulid.ULID, err error) { +func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, open []*Block, blockPopulator BlockPopulator) ([]ulid.ULID, error) { var ( blocks []BlockReader bs []*Block @@ -457,7 +461,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, for _, d := range dirs { meta, _, err := readMetaFile(d) if err != nil { - return uid, err + return nil, err } var b *Block @@ -475,7 +479,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, var err error b, err = OpenBlock(c.logger, d, c.chunkPool) if err != nil { - return uid, err + return nil, err } defer b.Close() } @@ -486,10 +490,10 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, uids = append(uids, meta.ULID.String()) } - uid = ulid.MustNew(ulid.Now(), rand.Reader) + uid := ulid.MustNew(ulid.Now(), rand.Reader) meta := CompactBlockMetas(uid, metas...) - err = c.write(dest, meta, blockPopulator, blocks...) + err := c.write(dest, meta, blockPopulator, blocks...) if err == nil { if meta.Stats.NumSamples == 0 { for _, b := range bs { @@ -503,25 +507,25 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, } b.numBytesMeta = n } - uid = ulid.ULID{} level.Info(c.logger).Log( "msg", "compact blocks resulted in empty block", "count", len(blocks), "sources", fmt.Sprintf("%v", uids), "duration", time.Since(start), ) - } else { - level.Info(c.logger).Log( - "msg", "compact blocks", - "count", len(blocks), - "mint", meta.MinTime, - "maxt", meta.MaxTime, - "ulid", meta.ULID, - "sources", fmt.Sprintf("%v", uids), - "duration", time.Since(start), - ) + return nil, nil } - return uid, nil + + level.Info(c.logger).Log( + "msg", "compact blocks", + "count", len(blocks), + "mint", meta.MinTime, + "maxt", meta.MaxTime, + "ulid", meta.ULID, + "sources", fmt.Sprintf("%v", uids), + "duration", time.Since(start), + ) + return []ulid.ULID{uid}, nil } errs := tsdb_errors.NewMulti(err) @@ -533,10 +537,10 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, } } - return uid, errs.Err() + return nil, errs.Err() } -func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) (ulid.ULID, error) { +func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, base *BlockMeta) ([]ulid.ULID, error) { start := time.Now() uid := ulid.MustNew(ulid.Now(), rand.Reader) @@ -560,7 +564,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b err := c.write(dest, meta, DefaultBlockPopulator{}, b) if err != nil { - return uid, err + return nil, err } if meta.Stats.NumSamples == 0 { @@ -570,7 +574,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b "maxt", meta.MaxTime, "duration", time.Since(start), ) - return ulid.ULID{}, nil + return nil, nil } level.Info(c.logger).Log( @@ -581,7 +585,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b "duration", time.Since(start), "ooo", meta.Compaction.FromOutOfOrder(), ) - return uid, nil + return []ulid.ULID{uid}, nil } // instrumentedChunkWriter is used for level 1 compactions to record statistics @@ -652,7 +656,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl } closers = append(closers, indexw) - if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw); err != nil { + if err := blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, indexw, chunkw, AllSortedPostings); err != nil { return fmt.Errorf("populate block: %w", err) } @@ -718,7 +722,20 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blockPopulator Bl } type BlockPopulator interface { - PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error + PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) error +} + +// IndexReaderPostingsFunc is a function to get a sorted posting iterator from a given index reader. +type IndexReaderPostingsFunc func(ctx context.Context, reader IndexReader) index.Postings + +// AllSortedPostings returns a sorted all posting iterator from the input index reader. +func AllSortedPostings(ctx context.Context, reader IndexReader) index.Postings { + k, v := index.AllPostingsKey() + all, err := reader.Postings(ctx, k, v) + if err != nil { + return index.ErrPostings(err) + } + return reader.SortedPostings(all) } type DefaultBlockPopulator struct{} @@ -726,7 +743,7 @@ type DefaultBlockPopulator struct{} // PopulateBlock fills the index and chunk writers with new data gathered as the union // of the provided blocks. It returns meta information for the new block. // It expects sorted blocks input by mint. -func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) (err error) { +func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter, postingsFunc IndexReaderPostingsFunc) (err error) { if len(blocks) == 0 { return errors.New("cannot populate block from no readers") } @@ -784,14 +801,9 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa } closers = append(closers, tombsr) - k, v := index.AllPostingsKey() - all, err := indexr.Postings(ctx, k, v) - if err != nil { - return err - } - all = indexr.SortedPostings(all) + postings := postingsFunc(ctx, indexr) // Blocks meta is half open: [min, max), so subtract 1 to ensure we don't hold samples with exact meta.MaxTime timestamp. - sets = append(sets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, all, meta.MinTime, meta.MaxTime-1, false)) + sets = append(sets, NewBlockChunkSeriesSet(b.Meta().ULID, indexr, chunkr, tombsr, postings, meta.MinTime, meta.MaxTime-1, false)) syms := indexr.Symbols() if i == 0 { symbols = syms diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 5651b403e5c..b2175d47582 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -192,12 +192,22 @@ type Options struct { // NewCompactorFunc is a function that returns a TSDB compactor. NewCompactorFunc NewCompactorFunc + + // BlockQuerierFunc is a function to return storage.Querier from a BlockReader. + BlockQuerierFunc BlockQuerierFunc + + // BlockChunkQuerierFunc is a function to return storage.ChunkQuerier from a BlockReader. + BlockChunkQuerierFunc BlockChunkQuerierFunc } type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{} +type BlockQuerierFunc func(b BlockReader, mint, maxt int64) (storage.Querier, error) + +type BlockChunkQuerierFunc func(b BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) + // DB handles reads and writes of time series falling into // a hashed partition of a seriedb. type DB struct { @@ -244,6 +254,10 @@ type DB struct { writeNotified wlog.WriteNotified registerer prometheus.Registerer + + blockQuerierFunc BlockQuerierFunc + + blockChunkQuerierFunc BlockChunkQuerierFunc } type dbMetrics struct { @@ -559,10 +573,12 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue db.closers = append(db.closers, head) return &DB{ - dir: db.dir, - logger: db.logger, - blocks: blocks, - head: head, + dir: db.dir, + logger: db.logger, + blocks: blocks, + head: head, + blockQuerierFunc: NewBlockQuerier, + blockChunkQuerierFunc: NewBlockChunkQuerier, }, nil } @@ -870,6 +886,18 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } db.compactCancel = cancel + if opts.BlockQuerierFunc == nil { + db.blockQuerierFunc = NewBlockQuerier + } else { + db.blockQuerierFunc = opts.BlockQuerierFunc + } + + if opts.BlockChunkQuerierFunc == nil { + db.blockChunkQuerierFunc = NewBlockChunkQuerier + } else { + db.blockChunkQuerierFunc = opts.BlockChunkQuerierFunc + } + var wal, wbl *wlog.WL segmentSize := wlog.DefaultSegmentSize // Wal is enabled. @@ -1336,13 +1364,11 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t += blockSize { mint, maxt := t, t+blockSize // Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes. - uid, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, meta) + uids, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, meta) if err != nil { return nil, err } - if uid.Compare(ulid.ULID{}) != 0 { - ulids = append(ulids, uid) - } + ulids = append(ulids, uids...) } if len(ulids) == 0 { @@ -1364,19 +1390,19 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID // compactHead compacts the given RangeHead. // The compaction mutex should be held before calling this method. func (db *DB) compactHead(head *RangeHead) error { - uid, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil) + uids, err := db.compactor.Write(db.dir, head, head.MinTime(), head.BlockMaxTime(), nil) if err != nil { return fmt.Errorf("persist head block: %w", err) } if err := db.reloadBlocks(); err != nil { - if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { - return tsdb_errors.NewMulti( - fmt.Errorf("reloadBlocks blocks: %w", err), - fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll), - ).Err() + multiErr := tsdb_errors.NewMulti(fmt.Errorf("reloadBlocks blocks: %w", err)) + for _, uid := range uids { + if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { + multiErr.Add(fmt.Errorf("delete persisted head block after failed db reloadBlocks:%s: %w", uid, errRemoveAll)) + } } - return fmt.Errorf("reloadBlocks blocks: %w", err) + return multiErr.Err() } if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil { return fmt.Errorf("head memory truncate: %w", err) @@ -1411,16 +1437,19 @@ func (db *DB) compactBlocks() (err error) { default: } - uid, err := db.compactor.Compact(db.dir, plan, db.blocks) + uids, err := db.compactor.Compact(db.dir, plan, db.blocks) if err != nil { return fmt.Errorf("compact %s: %w", plan, err) } if err := db.reloadBlocks(); err != nil { - if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil { - return fmt.Errorf("delete compacted block after failed db reloadBlocks:%s: %w", uid, err) + errs := tsdb_errors.NewMulti(fmt.Errorf("reloadBlocks blocks: %w", err)) + for _, uid := range uids { + if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { + errs.Add(fmt.Errorf("delete persisted block after failed db reloadBlocks:%s: %w", uid, errRemoveAll)) + } } - return fmt.Errorf("reloadBlocks blocks: %w", err) + return errs.Err() } } @@ -1541,12 +1570,15 @@ func (db *DB) reloadBlocks() (err error) { oldBlocks := db.blocks db.blocks = toLoad - blockMetas := make([]BlockMeta, 0, len(toLoad)) - for _, b := range toLoad { - blockMetas = append(blockMetas, b.Meta()) - } - if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) + // Only check overlapping blocks when overlapping compaction is enabled. + if db.opts.EnableOverlappingCompaction { + blockMetas := make([]BlockMeta, 0, len(toLoad)) + for _, b := range toLoad { + blockMetas = append(blockMetas, b.Meta()) + } + if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { + level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) + } } // Append blocks to old, deletable blocks, so we can close them. @@ -1960,7 +1992,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { if maxt >= db.head.MinTime() { rh := NewRangeHead(db.head, mint, maxt) var err error - inOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt) + inOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt) if err != nil { return nil, fmt.Errorf("open block querier for head %s: %w", rh, err) } @@ -1977,7 +2009,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { } if getNew { rh := NewRangeHead(db.head, newMint, maxt) - inOrderHeadQuerier, err = NewBlockQuerier(rh, newMint, maxt) + inOrderHeadQuerier, err = db.blockQuerierFunc(rh, newMint, maxt) if err != nil { return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err) } @@ -1991,9 +2023,9 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) var err error - outOfOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt) + outOfOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt) if err != nil { - // If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead. + // If BlockQuerierFunc() failed, make sure to clean up the pending read created by NewOOORangeHead. rh.isoState.Close() return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err) @@ -2003,7 +2035,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { } for _, b := range blocks { - q, err := NewBlockQuerier(b, mint, maxt) + q, err := db.blockQuerierFunc(b, mint, maxt) if err != nil { return nil, fmt.Errorf("open querier for block %s: %w", b, err) } @@ -2041,7 +2073,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer if maxt >= db.head.MinTime() { rh := NewRangeHead(db.head, mint, maxt) - inOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) + inOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt) if err != nil { return nil, fmt.Errorf("open querier for head %s: %w", rh, err) } @@ -2058,7 +2090,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer } if getNew { rh := NewRangeHead(db.head, newMint, maxt) - inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, newMint, maxt) + inOrderHeadQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt) if err != nil { return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err) } @@ -2071,8 +2103,11 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) - outOfOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) + outOfOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt) if err != nil { + // If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead. + rh.isoState.Close() + return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err) } @@ -2080,7 +2115,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer } for _, b := range blocks { - q, err := NewBlockChunkQuerier(b, mint, maxt) + q, err := db.blockChunkQuerierFunc(b, mint, maxt) if err != nil { return nil, fmt.Errorf("open querier for block %s: %w", b, err) } @@ -2149,7 +2184,7 @@ func (db *DB) CleanTombstones() (err error) { cleanUpCompleted = true for _, pb := range db.Blocks() { - uid, safeToDelete, cleanErr := pb.CleanTombstones(db.Dir(), db.compactor) + uids, safeToDelete, cleanErr := pb.CleanTombstones(db.Dir(), db.compactor) if cleanErr != nil { return fmt.Errorf("clean tombstones: %s: %w", pb.Dir(), cleanErr) } @@ -2173,7 +2208,7 @@ func (db *DB) CleanTombstones() (err error) { } // Delete new block if it was created. - if uid != nil && *uid != (ulid.ULID{}) { + for _, uid := range uids { dir := filepath.Join(db.Dir(), uid.String()) if err := os.RemoveAll(dir); err != nil { level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index d5f7144fdbc..5972a9c5d6a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -1552,7 +1552,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) { // Drop old chunks and remember series IDs and hashes if they can be // deleted entirely. - deleted, chunksRemoved, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef) + deleted, affected, chunksRemoved, actualInOrderMint, minOOOTime, minMmapFile := h.series.gc(mint, minOOOMmapRef) seriesRemoved := len(deleted) h.metrics.seriesRemoved.Add(float64(seriesRemoved)) @@ -1561,7 +1561,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) { h.numSeries.Sub(uint64(seriesRemoved)) // Remove deleted series IDs from the postings lists. - h.postings.Delete(deleted) + h.postings.Delete(deleted, affected) // Remove tombstones referring to the deleted series. h.tombstones.DeleteTombstones(deleted) @@ -1869,9 +1869,10 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st // but the returned map goes into postings.Delete() which expects a map[storage.SeriesRef]struct // and there's no easy way to cast maps. // minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series. -func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) { +func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ map[labels.Label]struct{}, _ int, _, _ int64, minMmapFile int) { var ( deleted = map[storage.SeriesRef]struct{}{} + affected = map[labels.Label]struct{}{} rmChunks = 0 actualMint int64 = math.MaxInt64 minOOOTime int64 = math.MaxInt64 @@ -1927,6 +1928,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( } deleted[storage.SeriesRef(series.ref)] = struct{}{} + series.lset.Range(func(l labels.Label) { affected[l] = struct{}{} }) s.hashes[hashShard].del(hash, series.ref) delete(s.series[refShard], series.ref) deletedForCallback[series.ref] = series.lset @@ -1938,7 +1940,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( actualMint = mint } - return deleted, rmChunks, actualMint, minOOOTime, minMmapFile + return deleted, affected, rmChunks, actualMint, minOOOTime, minMmapFile } // The iterForDeletion function iterates through all series, invoking the checkDeletedFunc for each. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index df15abcd500..689972f1b79 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -267,22 +267,29 @@ func (h *headIndexReader) LabelValueFor(_ context.Context, id storage.SeriesRef, return value, nil } -// LabelNamesFor returns all the label names for the series referred to by IDs. +// LabelNamesFor returns all the label names for the series referred to by the postings. // The names returned are sorted. -func (h *headIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { +func (h *headIndexReader) LabelNamesFor(ctx context.Context, series index.Postings) ([]string, error) { namesMap := make(map[string]struct{}) - for _, id := range ids { - if ctx.Err() != nil { + i := 0 + for series.Next() { + i++ + if i%checkContextEveryNIterations == 0 && ctx.Err() != nil { return nil, ctx.Err() } - memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id)) + memSeries := h.head.series.getByID(chunks.HeadSeriesRef(series.At())) if memSeries == nil { - return nil, storage.ErrNotFound + // Series not found, this happens during compaction, + // when series was garbage collected after the caller got the series IDs. + continue } memSeries.lset.Range(func(lbl labels.Label) { namesMap[lbl.Name] = struct{}{} }) } + if err := series.Err(); err != nil { + return nil, err + } names := make([]string, 0, len(namesMap)) for name := range namesMap { names = append(names, name) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index 480e6a8fc7a..36210545983 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -1551,12 +1551,18 @@ func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labe // LabelNamesFor returns all the label names for the series referred to by IDs. // The names returned are sorted. -func (r *Reader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { +func (r *Reader) LabelNamesFor(ctx context.Context, postings Postings) ([]string, error) { // Gather offsetsMap the name offsetsMap in the symbol table first offsetsMap := make(map[uint32]struct{}) - for _, id := range ids { - if ctx.Err() != nil { - return nil, ctx.Err() + i := 0 + for postings.Next() { + id := postings.At() + i++ + + if i%checkContextEveryNIterations == 0 { + if ctxErr := ctx.Err(); ctxErr != nil { + return nil, ctxErr + } } offset := id diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index 159f6416e2e..d9b5b69de06 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -288,62 +288,34 @@ func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) { } // Delete removes all ids in the given map from the postings lists. -func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}) { - var keys, vals []string - - // Collect all keys relevant for deletion once. New keys added afterwards - // can by definition not be affected by any of the given deletes. - p.mtx.RLock() - for n := range p.m { - keys = append(keys, n) - } - p.mtx.RUnlock() - - for _, n := range keys { - p.mtx.RLock() - vals = vals[:0] - for v := range p.m[n] { - vals = append(vals, v) - } - p.mtx.RUnlock() - - // For each posting we first analyse whether the postings list is affected by the deletes. - // If yes, we actually reallocate a new postings list. - for _, l := range vals { - // Only lock for processing one postings list so we don't block reads for too long. - p.mtx.Lock() - - found := false - for _, id := range p.m[n][l] { - if _, ok := deleted[id]; ok { - found = true - break - } - } - if !found { - p.mtx.Unlock() - continue - } - repl := make([]storage.SeriesRef, 0, len(p.m[n][l])) +// affectedLabels contains all the labels that are affected by the deletion, there's no need to check other labels. +func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected map[labels.Label]struct{}) { + p.mtx.Lock() + defer p.mtx.Unlock() - for _, id := range p.m[n][l] { - if _, ok := deleted[id]; !ok { - repl = append(repl, id) - } + process := func(l labels.Label) { + orig := p.m[l.Name][l.Value] + repl := make([]storage.SeriesRef, 0, len(orig)) + for _, id := range orig { + if _, ok := deleted[id]; !ok { + repl = append(repl, id) } - if len(repl) > 0 { - p.m[n][l] = repl - } else { - delete(p.m[n], l) - } - p.mtx.Unlock() } - p.mtx.Lock() - if len(p.m[n]) == 0 { - delete(p.m, n) + if len(repl) > 0 { + p.m[l.Name][l.Value] = repl + } else { + delete(p.m[l.Name], l.Value) + // Delete the key if we removed all values. + if len(p.m[l.Name]) == 0 { + delete(p.m, l.Name) + } } - p.mtx.Unlock() } + + for l := range affected { + process(l) + } + process(allPostingsKey) } // Iter calls f for each postings list. It aborts if f returns an error and returns it. @@ -398,16 +370,62 @@ func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) { } func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) Postings { + // We'll copy the values into a slice and then match over that, + // this way we don't need to hold the mutex while we're matching, + // which can be slow (seconds) if the match function is a huge regex. + // Holding this lock prevents new series from being added (slows down the write path) + // and blocks the compaction process. + vals := p.labelValues(name) + for i, count := 0, 1; i < len(vals); count++ { + if count%checkContextEveryNIterations == 0 && ctx.Err() != nil { + return ErrPostings(ctx.Err()) + } + + if match(vals[i]) { + i++ + continue + } + + // Didn't match, bring the last value to this position, make the slice shorter and check again. + // The order of the slice doesn't matter as it comes from a map iteration. + vals[i], vals = vals[len(vals)-1], vals[:len(vals)-1] + } + + // If none matched (or this label had no values), no need to grab the lock again. + if len(vals) == 0 { + return EmptyPostings() + } + + // Now `vals` only contains the values that matched, get their postings. + its := make([]Postings, 0, len(vals)) p.mtx.RLock() + e := p.m[name] + for _, v := range vals { + if refs, ok := e[v]; ok { + // Some of the values may have been garbage-collected in the meantime this is fine, we'll just skip them. + // If we didn't let the mutex go, we'd have these postings here, but they would be pointing nowhere + // because there would be a `MemPostings.Delete()` call waiting for the lock to delete these labels, + // because the series were deleted already. + its = append(its, NewListPostings(refs)) + } + } + // Let the mutex go before merging. + p.mtx.RUnlock() + + return Merge(ctx, its...) +} + +// labelValues returns a slice of label values for the given label name. +// It will take the read lock. +func (p *MemPostings) labelValues(name string) []string { + p.mtx.RLock() + defer p.mtx.RUnlock() e := p.m[name] if len(e) == 0 { - p.mtx.RUnlock() - return EmptyPostings() + return nil } - // Benchmarking shows that first copying the values into a slice and then matching over that is - // faster than matching over the map keys directly, at least on AMD64. vals := make([]string, 0, len(e)) for v, srs := range e { if len(srs) > 0 { @@ -415,21 +433,7 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, } } - var its []Postings - count := 1 - for _, v := range vals { - if count%checkContextEveryNIterations == 0 && ctx.Err() != nil { - p.mtx.RUnlock() - return ErrPostings(ctx.Err()) - } - count++ - if match(v) { - its = append(its, NewListPostings(e[v])) - } - } - p.mtx.RUnlock() - - return Merge(ctx, its...) + return vals } // ExpandPostings returns the postings expanded as a slice. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go index af431d678f7..3b5adf80c92 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go @@ -483,7 +483,7 @@ func (ir *OOOCompactionHeadIndexReader) LabelValueFor(context.Context, storage.S return "", errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) { return nil, errors.New("not implemented") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 1071c4a716d..fb4a87cc8c0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -447,16 +447,7 @@ func labelNamesWithMatchers(ctx context.Context, r IndexReader, matchers ...*lab if err != nil { return nil, err } - - var postings []storage.SeriesRef - for p.Next() { - postings = append(postings, p.At()) - } - if err := p.Err(); err != nil { - return nil, fmt.Errorf("postings for label names with matchers: %w", err) - } - - return r.LabelNamesFor(ctx, postings...) + return r.LabelNamesFor(ctx, p) } // seriesData, used inside other iterators, are updated when we move from one series to another. diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index f0884926e11..b95ff25cf9d 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -1761,7 +1761,7 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface b, err := codec.Encode(resp) if err != nil { - level.Error(api.logger).Log("msg", "error marshaling response", "err", err) + level.Error(api.logger).Log("msg", "error marshaling response", "url", req.URL, "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -1769,7 +1769,7 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface w.Header().Set("Content-Type", codec.ContentType().String()) w.WriteHeader(http.StatusOK) if n, err := w.Write(b); err != nil { - level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) + level.Error(api.logger).Log("msg", "error writing response", "url", req.URL, "bytesWritten", n, "err", err) } } diff --git a/vendor/github.com/thanos-io/objstore/.go-version b/vendor/github.com/thanos-io/objstore/.go-version index d8c94e3e98c..dbfae7a0293 100644 --- a/vendor/github.com/thanos-io/objstore/.go-version +++ b/vendor/github.com/thanos-io/objstore/.go-version @@ -1 +1 @@ -1.20.x +1.21.x diff --git a/vendor/github.com/thanos-io/objstore/CHANGELOG.md b/vendor/github.com/thanos-io/objstore/CHANGELOG.md index 5889578b2ee..3deec796d49 100644 --- a/vendor/github.com/thanos-io/objstore/CHANGELOG.md +++ b/vendor/github.com/thanos-io/objstore/CHANGELOG.md @@ -11,6 +11,8 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ## Unreleased ### Fixed +- [#117](https://github.com/thanos-io/objstore/pull/117) Metrics: Fix `objstore_bucket_operation_failures_total` incorrectly incremented if context is cancelled while reading object contents. +- [#115](https://github.com/thanos-io/objstore/pull/115) GCS: Fix creation of bucket with GRPC connections. Also update storage client to `v1.40.0`. - [#102](https://github.com/thanos-io/objstore/pull/102) Azure: bump azblob sdk to get concurrency fixes. - [#33](https://github.com/thanos-io/objstore/pull/33) Tracing: Add `ContextWithTracer()` to inject the tracer into the context. - [#34](https://github.com/thanos-io/objstore/pull/34) Fix ignored options when creating shared credential Azure client. @@ -43,6 +45,8 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#86](https://github.com/thanos-io/objstore/pull/86) GCS: Add HTTP Config to GCS - [#99](https://github.com/thanos-io/objstore/pull/99) Swift: Add HTTP_Config - [#108](https://github.com/thanos-io/objstore/pull/108) Metrics: Add native histogram definitions to histograms +- [#112](https://github.com/thanos-io/objstore/pull/112) S3: Add `DisableDualstack option. +- [#100](https://github.com/thanos-io/objstore/pull/100) s3: add DisableMultipart option ### Changed - [#38](https://github.com/thanos-io/objstore/pull/38) *: Upgrade minio-go version to `v7.0.45`. diff --git a/vendor/github.com/thanos-io/objstore/README.md b/vendor/github.com/thanos-io/objstore/README.md index 6ebdffc1795..6d848e79746 100644 --- a/vendor/github.com/thanos-io/objstore/README.md +++ b/vendor/github.com/thanos-io/objstore/README.md @@ -145,7 +145,7 @@ Thanos uses the [minio client](https://github.com/minio/minio-go) library to upl > NOTE: S3 client was designed for AWS S3, but it can be configured against other S3-compatible object storages e.g Ceph -The S# object storage yaml configuration definition: +The S3 object storage yaml configuration definition: ```yaml mdox-exec="go run scripts/cfggen/main.go --name=s3.Config" type: S3 @@ -153,6 +153,7 @@ config: bucket: "" endpoint: "" region: "" + disable_dualstack: false aws_sdk_auth: false access_key: "" insecure: false @@ -181,6 +182,7 @@ config: list_objects_version: "" bucket_lookup_type: auto send_content_md5: true + disable_multipart: false part_size: 67108864 sse_config: type: "" @@ -199,6 +201,8 @@ The field `prefix` can be used to transparently use prefixes in your S3 bucket. The AWS region to endpoint mapping can be found in this [link](https://docs.aws.amazon.com/general/latest/gr/s3.html). +By default, the library prefers using [dual-stack endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html). You can explicitly disable this behaviour by setting `disable_dualstack: true`. + Make sure you use a correct signature version. Currently AWS requires signature v4, so it needs `signature_version2: false`. If you don't specify it, you will get an `Access Denied` error. On the other hand, several S3 compatible APIs use `signature_version2: true`. You can configure the timeout settings for the HTTP client by setting the `http_config.idle_conn_timeout` and `http_config.response_header_timeout` keys. As a rule of thumb, if you are seeing errors like `timeout awaiting response headers` in your logs, you may want to increase the value of `http_config.response_header_timeout`. diff --git a/vendor/github.com/thanos-io/objstore/objstore.go b/vendor/github.com/thanos-io/objstore/objstore.go index c913068711b..31c167ebd77 100644 --- a/vendor/github.com/thanos-io/objstore/objstore.go +++ b/vendor/github.com/thanos-io/objstore/objstore.go @@ -756,7 +756,7 @@ func (r *timingReader) Read(b []byte) (n int, err error) { r.readBytes += int64(n) // Report metric just once. if !r.alreadyGotErr && err != nil && err != io.EOF { - if !r.isFailureExpected(err) { + if !r.isFailureExpected(err) && !errors.Is(err, context.Canceled) { r.failed.WithLabelValues(r.op).Inc() } r.alreadyGotErr = true diff --git a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go index 75c6130374d..c4743630505 100644 --- a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go +++ b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go @@ -24,6 +24,7 @@ import ( htransport "google.golang.org/api/transport/http" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/experimental" "google.golang.org/grpc/status" "gopkg.in/yaml.v2" @@ -100,6 +101,18 @@ func NewBucketWithConfig(ctx context.Context, logger log.Logger, gc Config, comp option.WithUserAgent(fmt.Sprintf("thanos-%s/%s (%s)", component, version.Version, runtime.Version())), ) + if !gc.UseGRPC { + var err error + opts, err = appendHttpOptions(gc, opts) + if err != nil { + return nil, err + } + } + + return newBucket(ctx, logger, gc, opts) +} + +func appendHttpOptions(gc Config, opts []option.ClientOption) ([]option.ClientOption, error) { // Check if a roundtripper has been set in the config // otherwise build the default transport. var rt http.RoundTripper @@ -126,9 +139,7 @@ func NewBucketWithConfig(ctx context.Context, logger log.Logger, gc Config, comp Transport: gRT, Timeout: time.Duration(gc.HTTPConfig.IdleConnTimeout), } - opts = append(opts, option.WithHTTPClient(httpCli)) - - return newBucket(ctx, logger, gc, opts) + return append(opts, option.WithHTTPClient(httpCli)), nil } func newBucket(ctx context.Context, logger log.Logger, gc Config, opts []option.ClientOption) (*Bucket, error) { @@ -138,7 +149,7 @@ func newBucket(ctx context.Context, logger log.Logger, gc Config, opts []option. ) if gc.UseGRPC { opts = append(opts, - option.WithGRPCDialOption(grpc.WithRecvBufferPool(grpc.NewSharedBufferPool())), + option.WithGRPCDialOption(experimental.WithRecvBufferPool(grpc.NewSharedBufferPool())), option.WithGRPCConnectionPool(gc.GRPCConnPoolSize), ) gcsClient, err = storage.NewGRPCClient(ctx, opts...) diff --git a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go index 721300349e1..dad89e6698e 100644 --- a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go +++ b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go @@ -101,6 +101,7 @@ const ( var DefaultConfig = Config{ PutUserMetadata: map[string]string{}, HTTPConfig: exthttp.DefaultHTTPConfig, + DisableMultipart: false, PartSize: 1024 * 1024 * 64, // 64MB. BucketLookupType: AutoLookup, SendContentMd5: true, // Default to using MD5. @@ -116,6 +117,7 @@ type Config struct { Bucket string `yaml:"bucket"` Endpoint string `yaml:"endpoint"` Region string `yaml:"region"` + DisableDualstack bool `yaml:"disable_dualstack"` AWSSDKAuth bool `yaml:"aws_sdk_auth"` AccessKey string `yaml:"access_key"` Insecure bool `yaml:"insecure"` @@ -128,6 +130,7 @@ type Config struct { ListObjectsVersion string `yaml:"list_objects_version"` BucketLookupType BucketLookupType `yaml:"bucket_lookup_type"` SendContentMd5 bool `yaml:"send_content_md5"` + DisableMultipart bool `yaml:"disable_multipart"` // PartSize used for multipart upload. Only used if uploaded object size is known and larger than configured PartSize. // NOTE we need to make sure this number does not produce more parts than 10 000. PartSize uint64 `yaml:"part_size"` @@ -150,15 +153,16 @@ type TraceConfig struct { // Bucket implements the store.Bucket interface against s3-compatible APIs. type Bucket struct { - logger log.Logger - name string - client *minio.Client - defaultSSE encrypt.ServerSide - putUserMetadata map[string]string - storageClass string - partSize uint64 - listObjectsV1 bool - sendContentMd5 bool + logger log.Logger + name string + client *minio.Client + defaultSSE encrypt.ServerSide + putUserMetadata map[string]string + storageClass string + disableMultipart bool + partSize uint64 + listObjectsV1 bool + sendContentMd5 bool } // parseConfig unmarshals a buffer into a Config with default values. @@ -299,6 +303,11 @@ func NewBucketWithConfig(logger log.Logger, config Config, component string) (*B } } + if config.DisableDualstack { + // The value in the config is inverted for backward compatibility + client.SetS3EnableDualstack(false) + } + if config.TraceConfig.Enable { logWriter := log.NewStdlibAdapter(level.Debug(logger), log.MessageKey("s3TraceMsg")) client.TraceOn(logWriter) @@ -319,15 +328,16 @@ func NewBucketWithConfig(logger log.Logger, config Config, component string) (*B } bkt := &Bucket{ - logger: logger, - name: config.Bucket, - client: client, - defaultSSE: sse, - putUserMetadata: config.PutUserMetadata, - storageClass: storageClass, - partSize: config.PartSize, - listObjectsV1: config.ListObjectsVersion == "v1", - sendContentMd5: config.SendContentMd5, + logger: logger, + name: config.Bucket, + client: client, + defaultSSE: sse, + putUserMetadata: config.PutUserMetadata, + storageClass: storageClass, + disableMultipart: config.DisableMultipart, + partSize: config.PartSize, + listObjectsV1: config.ListObjectsVersion == "v1", + sendContentMd5: config.SendContentMd5, } return bkt, nil } @@ -500,6 +510,7 @@ func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { r, size, minio.PutObjectOptions{ + DisableMultipart: b.disableMultipart, PartSize: partSize, ServerSideEncryption: sse, UserMetadata: userMetadata, diff --git a/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go index 1198ad79034..dc94938d752 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go +++ b/vendor/github.com/thanos-io/thanos/pkg/clientconfig/http.go @@ -162,21 +162,28 @@ func NewRoundTripperFromConfig(cfg config_util.HTTPClientConfig, transportConfig // If an authorization_credentials is provided, create a round tripper that will set the // Authorization header correctly on each request. if cfg.Authorization != nil && len(cfg.Authorization.Credentials) > 0 { - rt = config_util.NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, cfg.Authorization.Credentials, rt) + rt = config_util.NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, config_util.NewInlineSecret(string(cfg.Authorization.Credentials)), rt) } else if cfg.Authorization != nil && len(cfg.Authorization.CredentialsFile) > 0 { - rt = config_util.NewAuthorizationCredentialsFileRoundTripper(cfg.Authorization.Type, cfg.Authorization.CredentialsFile, rt) + rt = config_util.NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, config_util.NewFileSecret(cfg.Authorization.CredentialsFile), rt) } // Backwards compatibility, be nice with importers who would not have // called Validate(). if len(cfg.BearerToken) > 0 { - rt = config_util.NewAuthorizationCredentialsRoundTripper("Bearer", cfg.BearerToken, rt) + rt = config_util.NewAuthorizationCredentialsRoundTripper("Bearer", config_util.NewInlineSecret(string(cfg.BearerToken)), rt) } else if len(cfg.BearerTokenFile) > 0 { - rt = config_util.NewAuthorizationCredentialsFileRoundTripper("Bearer", cfg.BearerTokenFile, rt) + rt = config_util.NewAuthorizationCredentialsRoundTripper("Bearer", config_util.NewFileSecret(cfg.BearerTokenFile), rt) } if cfg.BasicAuth != nil { // TODO(yeya24): expose UsernameFile as a config. - rt = config_util.NewBasicAuthRoundTripper(cfg.BasicAuth.Username, cfg.BasicAuth.Password, "", cfg.BasicAuth.PasswordFile, rt) + username := config_util.NewInlineSecret(cfg.BasicAuth.Username) + var password config_util.SecretReader + if len(cfg.BasicAuth.PasswordFile) > 0 { + password = config_util.NewFileSecret(cfg.BasicAuth.PasswordFile) + } else { + password = config_util.NewInlineSecret(string(cfg.BasicAuth.Password)) + } + rt = config_util.NewBasicAuthRoundTripper(username, password, rt) } // Return a new configured RoundTripper. return rt, nil @@ -193,9 +200,9 @@ func NewRoundTripperFromConfig(cfg config_util.HTTPClientConfig, transportConfig } return config_util.NewTLSRoundTripper(tlsConfig, config_util.TLSRoundTripperSettings{ - CAFile: cfg.TLSConfig.CAFile, - CertFile: cfg.TLSConfig.CertFile, - KeyFile: cfg.TLSConfig.KeyFile, + CA: config_util.NewFileSecret(cfg.TLSConfig.CAFile), + Cert: config_util.NewFileSecret(cfg.TLSConfig.CertFile), + Key: config_util.NewFileSecret(cfg.TLSConfig.KeyFile), }, newRT) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 6b2b73aa462..522e4c9d4c0 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -851,17 +851,19 @@ type Compactor interface { // only be called concurrently with results of Plan(). // Can optionally pass a list of already open blocks, // to avoid having to reopen them. - // When resulting Block has 0 samples + // Prometheus always return one or no block. The interface allows returning more than one + // block for downstream users to experiment with compactor. + // When one resulting Block has 0 samples // * No block is written. // * The source dirs are marked Deletable. - // * Returns empty ulid.ULID{}. - Compact(dest string, dirs []string, open []*tsdb.Block) (ulid.ULID, error) - CompactWithBlockPopulator(dest string, dirs []string, open []*tsdb.Block, blockPopulator tsdb.BlockPopulator) (ulid.ULID, error) + // * Block is not included in the result. + Compact(dest string, dirs []string, open []*tsdb.Block) ([]ulid.ULID, error) + CompactWithBlockPopulator(dest string, dirs []string, open []*tsdb.Block, blockPopulator tsdb.BlockPopulator) ([]ulid.ULID, error) } // Compact plans and runs a single compaction against the group. The compacted result // is uploaded into the bucket the blocks were retrieved from. -func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp Compactor, blockDeletableChecker BlockDeletableChecker, compactionLifecycleCallback CompactionLifecycleCallback) (shouldRerun bool, compID ulid.ULID, rerr error) { +func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp Compactor, blockDeletableChecker BlockDeletableChecker, compactionLifecycleCallback CompactionLifecycleCallback) (shouldRerun bool, compIDs []ulid.ULID, rerr error) { cg.compactionRunsStarted.Inc() subDir := filepath.Join(dir, cg.Key()) @@ -878,7 +880,7 @@ func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp }() if err := os.MkdirAll(subDir, 0750); err != nil { - return false, ulid.ULID{}, errors.Wrap(err, "create compaction group dir") + return false, nil, errors.Wrap(err, "create compaction group dir") } defer func() { @@ -898,17 +900,17 @@ func (cg *Group) Compact(ctx context.Context, dir string, planner Planner, comp errChan := make(chan error, 1) err := tracing.DoInSpanWithErr(ctx, "compaction_group", func(ctx context.Context) (err error) { - shouldRerun, compID, err = cg.compact(ctx, subDir, planner, comp, blockDeletableChecker, compactionLifecycleCallback, errChan) + shouldRerun, compIDs, err = cg.compact(ctx, subDir, planner, comp, blockDeletableChecker, compactionLifecycleCallback, errChan) return err }, opentracing.Tags{"group.key": cg.Key()}) errChan <- err close(errChan) if err != nil { cg.compactionFailures.Inc() - return false, ulid.ULID{}, err + return false, nil, err } cg.compactionRunsCompleted.Inc() - return shouldRerun, compID, nil + return shouldRerun, compIDs, nil } // Issue347Error is a type wrapper for errors that should invoke repair process for broken block. @@ -1114,7 +1116,7 @@ func RepairIssue347(ctx context.Context, logger log.Logger, bkt objstore.Bucket, return nil } -func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp Compactor, blockDeletableChecker BlockDeletableChecker, compactionLifecycleCallback CompactionLifecycleCallback, errChan chan error) (shouldRerun bool, compID ulid.ULID, _ error) { +func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp Compactor, blockDeletableChecker BlockDeletableChecker, compactionLifecycleCallback CompactionLifecycleCallback, errChan chan error) (bool, []ulid.ULID, error) { cg.mtx.Lock() defer cg.mtx.Unlock() @@ -1124,7 +1126,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp // TODO(bwplotka): It would really nice if we could still check for other overlaps than replica. In fact this should be checked // in syncer itself. Otherwise with vertical compaction enabled we will sacrifice this important check. if !cg.enableVerticalCompaction { - return false, ulid.ULID{}, halt(errors.Wrap(err, "pre compaction overlap check")) + return false, nil, halt(errors.Wrap(err, "pre compaction overlap check")) } overlappingBlocks = true @@ -1135,11 +1137,11 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp toCompact, e = planner.Plan(ctx, cg.metasByMinTime, errChan, cg.extensions) return e }); err != nil { - return false, ulid.ULID{}, errors.Wrap(err, "plan compaction") + return false, nil, errors.Wrap(err, "plan compaction") } if len(toCompact) == 0 { // Nothing to do. - return false, ulid.ULID{}, nil + return false, nil, nil } level.Info(cg.logger).Log("msg", "compaction available and planned", "plan", fmt.Sprintf("%v", toCompact)) @@ -1149,7 +1151,7 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp begin := groupCompactionBegin if err := compactionLifecycleCallback.PreCompactionCallback(ctx, cg.logger, cg, toCompact); err != nil { - return false, ulid.ULID{}, errors.Wrapf(err, "failed to run pre compaction callback for plan: %s", fmt.Sprintf("%v", toCompact)) + return false, nil, errors.Wrapf(err, "failed to run pre compaction callback for plan: %s", fmt.Sprintf("%v", toCompact)) } level.Info(cg.logger).Log("msg", "finished running pre compaction callback; downloading blocks", "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds(), "plan", fmt.Sprintf("%v", toCompact)) @@ -1206,25 +1208,26 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp sourceBlockStr := fmt.Sprintf("%v", toCompactDirs) if err := g.Wait(); err != nil { - return false, ulid.ULID{}, err + return false, nil, err } level.Info(cg.logger).Log("msg", "downloaded and verified blocks; compacting blocks", "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds(), "plan", sourceBlockStr) begin = time.Now() + var compIDs []ulid.ULID if err := tracing.DoInSpanWithErr(ctx, "compaction", func(ctx context.Context) (e error) { populateBlockFunc, e := compactionLifecycleCallback.GetBlockPopulator(ctx, cg.logger, cg) if e != nil { return e } - compID, e = comp.CompactWithBlockPopulator(dir, toCompactDirs, nil, populateBlockFunc) + compIDs, e = comp.CompactWithBlockPopulator(dir, toCompactDirs, nil, populateBlockFunc) return e }); err != nil { - return false, ulid.ULID{}, halt(errors.Wrapf(err, "compact blocks %v", toCompactDirs)) + return false, nil, halt(errors.Wrapf(err, "compact blocks %v", toCompactDirs)) } - if compID == (ulid.ULID{}) { - // Prometheus compactor found that the compacted block would have no samples. - level.Info(cg.logger).Log("msg", "compacted block would have no samples, deleting source blocks", "blocks", sourceBlockStr) + if len(compIDs) == 0 { + // No compacted blocks means all compacted blocks are of no sample. + level.Info(cg.logger).Log("msg", "no compacted blocks, deleting source blocks", "blocks", sourceBlockStr) for _, meta := range toCompact { if meta.Stats.NumSamples == 0 { if err := cg.deleteBlock(meta.ULID, filepath.Join(dir, meta.ULID.String()), blockDeletableChecker); err != nil { @@ -1232,99 +1235,103 @@ func (cg *Group) compact(ctx context.Context, dir string, planner Planner, comp } } } - // Even though this block was empty, there may be more work to do. - return true, ulid.ULID{}, nil + // Even though no compacted blocks, there may be more work to do. + return true, nil, nil } cg.compactions.Inc() if overlappingBlocks { cg.verticalCompactions.Inc() } - level.Info(cg.logger).Log("msg", "compacted blocks", "new", compID, + compIDStrings := make([]string, 0, len(compIDs)) + for _, compID := range compIDs { + compIDStrings = append(compIDStrings, compID.String()) + } + compIDStrs := fmt.Sprintf("%v", compIDStrings) + level.Info(cg.logger).Log("msg", "compacted blocks", "new", compIDStrs, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds(), "overlapping_blocks", overlappingBlocks, "blocks", sourceBlockStr) - bdir := filepath.Join(dir, compID.String()) - index := filepath.Join(bdir, block.IndexFilename) - - if err := os.Remove(filepath.Join(bdir, "tombstones")); err != nil { - return false, ulid.ULID{}, errors.Wrap(err, "remove tombstones") - } + for _, compID := range compIDs { + bdir := filepath.Join(dir, compID.String()) + index := filepath.Join(bdir, block.IndexFilename) - newMeta, err := metadata.ReadFromDir(bdir) - if err != nil { - return false, ulid.ULID{}, errors.Wrap(err, "read new meta") - } + if err := os.Remove(filepath.Join(bdir, "tombstones")); err != nil { + return false, nil, errors.Wrap(err, "remove tombstones") + } - var stats block.HealthStats - // Ensure the output block is valid. - err = tracing.DoInSpanWithErr(ctx, "compaction_verify_index", func(ctx context.Context) error { - stats, err = block.GatherIndexHealthStats(ctx, cg.logger, index, newMeta.MinTime, newMeta.MaxTime) + newMeta, err := metadata.ReadFromDir(bdir) if err != nil { - return err + return false, nil, errors.Wrap(err, "read new meta") } - return stats.AnyErr() - }) - if !cg.acceptMalformedIndex && err != nil { - return false, ulid.ULID{}, halt(errors.Wrapf(err, "invalid result block %s", bdir)) - } - thanosMeta := metadata.Thanos{ - Labels: cg.labels.Map(), - Downsample: metadata.ThanosDownsample{Resolution: cg.resolution}, - Source: metadata.CompactorSource, - SegmentFiles: block.GetSegmentFiles(bdir), - Extensions: cg.extensions, - } - if stats.ChunkMaxSize > 0 { - thanosMeta.IndexStats.ChunkMaxSize = stats.ChunkMaxSize - } - if stats.SeriesMaxSize > 0 { - thanosMeta.IndexStats.SeriesMaxSize = stats.SeriesMaxSize - } - newMeta, err = metadata.InjectThanos(cg.logger, bdir, thanosMeta, nil) - if err != nil { - return false, ulid.ULID{}, errors.Wrapf(err, "failed to finalize the block %s", bdir) - } + var stats block.HealthStats + // Ensure the output block is valid. + err = tracing.DoInSpanWithErr(ctx, "compaction_verify_index", func(ctx context.Context) error { + stats, err = block.GatherIndexHealthStats(ctx, cg.logger, index, newMeta.MinTime, newMeta.MaxTime) + if err != nil { + return err + } + return stats.AnyErr() + }) + if !cg.acceptMalformedIndex && err != nil { + return false, nil, halt(errors.Wrapf(err, "invalid result block %s", bdir)) + } - // Ensure the output block is not overlapping with anything else, - // unless vertical compaction is enabled. - if !cg.enableVerticalCompaction { - if err := cg.areBlocksOverlapping(newMeta, toCompact...); err != nil { - return false, ulid.ULID{}, halt(errors.Wrapf(err, "resulted compacted block %s overlaps with something", bdir)) + thanosMeta := metadata.Thanos{ + Labels: cg.labels.Map(), + Downsample: metadata.ThanosDownsample{Resolution: cg.resolution}, + Source: metadata.CompactorSource, + SegmentFiles: block.GetSegmentFiles(bdir), + Extensions: cg.extensions, + } + if stats.ChunkMaxSize > 0 { + thanosMeta.IndexStats.ChunkMaxSize = stats.ChunkMaxSize + } + if stats.SeriesMaxSize > 0 { + thanosMeta.IndexStats.SeriesMaxSize = stats.SeriesMaxSize + } + newMeta, err = metadata.InjectThanos(cg.logger, bdir, thanosMeta, nil) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to finalize the block %s", bdir) + } + // Ensure the output block is not overlapping with anything else, + // unless vertical compaction is enabled. + if !cg.enableVerticalCompaction { + if err := cg.areBlocksOverlapping(newMeta, toCompact...); err != nil { + return false, nil, halt(errors.Wrapf(err, "resulted compacted block %s overlaps with something", bdir)) + } } - } - begin = time.Now() + begin = time.Now() - err = tracing.DoInSpanWithErr(ctx, "compaction_block_upload", func(ctx context.Context) error { - return block.Upload(ctx, cg.logger, cg.bkt, bdir, cg.hashFunc, objstore.WithUploadConcurrency(cg.blockFilesConcurrency)) - }) - if err != nil { - return false, ulid.ULID{}, retry(errors.Wrapf(err, "upload of %s failed", compID)) + err = tracing.DoInSpanWithErr(ctx, "compaction_block_upload", func(ctx context.Context) error { + return block.Upload(ctx, cg.logger, cg.bkt, bdir, cg.hashFunc, objstore.WithUploadConcurrency(cg.blockFilesConcurrency)) + }) + if err != nil { + return false, nil, retry(errors.Wrapf(err, "upload of %s failed", compID)) + } + level.Info(cg.logger).Log("msg", "uploaded block", "result_block", compID, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds()) + level.Info(cg.logger).Log("msg", "running post compaction callback", "result_block", compID) + if err := compactionLifecycleCallback.PostCompactionCallback(ctx, cg.logger, cg, compID); err != nil { + return false, nil, retry(errors.Wrapf(err, "failed to run post compaction callback for result block %s", compID)) + } + level.Info(cg.logger).Log("msg", "finished running post compaction callback", "result_block", compID) } - level.Info(cg.logger).Log("msg", "uploaded block", "result_block", compID, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds()) // Mark for deletion the blocks we just compacted from the group and bucket so they do not get included // into the next planning cycle. // Eventually the block we just uploaded should get synced into the group again (including sync-delay). for _, meta := range toCompact { - err = tracing.DoInSpanWithErr(ctx, "compaction_block_delete", func(ctx context.Context) error { + if err := tracing.DoInSpanWithErr(ctx, "compaction_block_delete", func(ctx context.Context) error { return cg.deleteBlock(meta.ULID, filepath.Join(dir, meta.ULID.String()), blockDeletableChecker) - }, opentracing.Tags{"block.id": meta.ULID}) - if err != nil { - return false, ulid.ULID{}, retry(errors.Wrapf(err, "mark old block for deletion from bucket")) + }, opentracing.Tags{"block.id": meta.ULID}); err != nil { + return false, nil, retry(errors.Wrapf(err, "mark old block for deletion from bucket")) } cg.groupGarbageCollectedBlocks.Inc() } - level.Info(cg.logger).Log("msg", "running post compaction callback", "result_block", compID) - if err := compactionLifecycleCallback.PostCompactionCallback(ctx, cg.logger, cg, compID); err != nil { - return false, ulid.ULID{}, retry(errors.Wrapf(err, "failed to run post compaction callback for result block %s", compID)) - } - level.Info(cg.logger).Log("msg", "finished running post compaction callback", "result_block", compID) - level.Info(cg.logger).Log("msg", "finished compacting blocks", "duration", time.Since(groupCompactionBegin), - "duration_ms", time.Since(groupCompactionBegin).Milliseconds(), "result_block", compID, "source_blocks", sourceBlockStr) - return true, compID, nil + "duration_ms", time.Since(groupCompactionBegin).Milliseconds(), "result_blocks", compIDStrs, "source_blocks", sourceBlockStr) + return true, compIDs, nil } func (cg *Group) deleteBlock(id ulid.ULID, bdir string, blockDeletableChecker BlockDeletableChecker) error { diff --git a/vendor/go.opentelemetry.io/contrib/propagators/aws/xray/idgenerator.go b/vendor/go.opentelemetry.io/contrib/propagators/aws/xray/idgenerator.go index e59d1328667..dd47b1d92ec 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/aws/xray/idgenerator.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/aws/xray/idgenerator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package xray // import "go.opentelemetry.io/contrib/propagators/aws/xray" diff --git a/vendor/go.opentelemetry.io/contrib/propagators/aws/xray/propagator.go b/vendor/go.opentelemetry.io/contrib/propagators/aws/xray/propagator.go index 3eb98eaedd7..82165abc8be 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/aws/xray/propagator.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/aws/xray/propagator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package xray // import "go.opentelemetry.io/contrib/propagators/aws/xray" diff --git a/vendor/google.golang.org/grpc/experimental/experimental.go b/vendor/google.golang.org/grpc/experimental/experimental.go new file mode 100644 index 00000000000..de7f13a2210 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/experimental.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package experimental is a collection of experimental features that might +// have some rough edges to them. Housing experimental features in this package +// results in a user accessing these APIs as `experimental.Foo`, thereby making +// it explicit that the feature is experimental and using them in production +// code is at their own risk. +// +// All APIs in this package are experimental. +package experimental + +import ( + "google.golang.org/grpc" + "google.golang.org/grpc/internal" +) + +// WithRecvBufferPool returns a grpc.DialOption that configures the use of +// bufferPool for parsing incoming messages on a grpc.ClientConn. Depending on +// the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize +// one, begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the +// following options are used: WithStatsHandler, EnableTracing, or binary +// logging. In such cases, the shared buffer pool will be ignored. +// +// Note: It is not recommended to use the shared buffer pool when compression is +// enabled. +func WithRecvBufferPool(bufferPool grpc.SharedBufferPool) grpc.DialOption { + return internal.WithRecvBufferPool.(func(grpc.SharedBufferPool) grpc.DialOption)(bufferPool) +} + +// RecvBufferPool returns a grpc.ServerOption that configures the server to use +// the provided shared buffer pool for parsing incoming messages. Depending on +// the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize +// one, begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the +// following options are used: StatsHandler, EnableTracing, or binary logging. +// In such cases, the shared buffer pool will be ignored. +// +// Note: It is not recommended to use the shared buffer pool when compression is +// enabled. +func RecvBufferPool(bufferPool grpc.SharedBufferPool) grpc.ServerOption { + return internal.RecvBufferPool.(func(grpc.SharedBufferPool) grpc.ServerOption)(bufferPool) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index f47902371a6..bb2966e3b4c 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -102,7 +102,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -114,7 +114,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index a45f112bce3..24bc98ac422 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d2b3ac031e1..ea1d3e65a57 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) { // newSyntaxError returns an error with line and column information useful for // syntax errors. -func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(pos) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 87853e786d0..099b2bf451b 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index d96719829c2..c2d6bd5265d 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { +func New(f string, x ...any) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { +func Wrap(err error, f string, x ...any) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...interface{}) string { +func format(f string, x ...any) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index ece53bea328..df53ff40b25 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -383,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor { } return fd.L1.Message } +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 3bc3b1cdf80..8a57d60b08c 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -534,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, + New: func() any { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 570181eb487..e56c91a8dbe 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -45,6 +45,11 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } } // Default is resolved here since it depends on Enum being resolved. diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index f0e38c4ef4e..ba83fea44c3 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -68,7 +68,7 @@ type Builder struct { // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []interface{} + GoTypes []any // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []interface{} + goTypes []any depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 1447a11987b..f30ab6b586f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -860,11 +860,13 @@ const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumValueOptions_Features_field_name protoreflect.Name = "features" EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) @@ -873,6 +875,7 @@ const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index a371f98de14..5d5771c2ed5 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { +func (Export) NewError(f string, x ...any) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = interface{} +type enum = any // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = interface{} +type message = any // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index bff041edc94..f29e6a8fa88 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 2b8f122c27b..4bb0a7a20ce 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -99,6 +99,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index b7a23faf1e4..7a16ec13dd1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 185ef2efa5b..e06ece55a26 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() interface{} + protoUnwrap() any } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index f89136516f9..18cb96fd70a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() interface{} { +func (ls *listReflect) protoUnwrap() any { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index f30b0a0576d..304244a651d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() interface{} { +func (ms *mapReflect) protoUnwrap() any { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 845c67d6e7e..febd2122472 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) } } return mi.sizePointerSlow(p, opts) @@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } return size } @@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { + if size > (math.MaxInt32 - 1) { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } } return size @@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index cb25b0bae1d..e31249f64f7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} + ExtensionType any // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { +func (xi *ExtensionInfo) IsValidInterface(v any) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c1c33d0057e..81b2b1a763d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() interface{} { +func (e *legacyEnumWrapper) protoUnwrap() any { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 950e9a1fe7a..bf0b6049b46 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -216,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { for _, v := range vs { oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } @@ -567,6 +567,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() interface{} { +func (m aberrantMessage) protoUnwrap() any { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 629bacdcedd..019399d454d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -35,7 +35,7 @@ type MessageInfo struct { Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} + OneofWrappers []any initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +47,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} +type exporter func(v any, i int) any // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -201,7 +201,7 @@ fieldLoop: } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { oneofWrappers = vs } } @@ -256,7 +256,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type + valType any // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index a6f0dbdade6..ecb4623d701 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]any // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo + rangeInfos []any // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -255,6 +255,10 @@ func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if !ok { return false } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } switch { case xd.IsList(): return x.Value().List().Len() > 0 @@ -389,7 +393,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -417,7 +421,7 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() interface{} { +func (m *messageIfaceWrapper) protoUnwrap() any { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 29ba6bd3552..99dc23c6f0a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,7 +23,7 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() interface{} { +func (m *messageState) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { @@ -154,7 +154,7 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() interface{} { +func (m *messageReflectWrapper) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 517e94434c7..da685e8a29d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -16,7 +16,7 @@ import ( const UnsafeEnabled = false // Pointer is an opaque pointer type. -type Pointer interface{} +type Pointer any // offset represents the offset to a struct field, accessible from a pointer. // The offset is the field index into a struct. @@ -62,7 +62,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { return pointer{v: reflect.ValueOf(v)} } @@ -93,7 +93,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 4b020e31164..5f20ca5d8ab 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -50,7 +50,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +80,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index 1665a68e5b7..a1f09162d05 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, + New: func() any { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, + New: func() any { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a3cba508022..dbbf1f6862c 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 34 - Patch = 1 + Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index c9c8721a697..d248f292846 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,7 +39,7 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { +func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -51,7 +51,7 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -78,7 +78,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 254ca585424..f3cebab29c8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -46,6 +46,11 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } + if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + f.L1.Kind = protoreflect.MessageKind + } if fd.DefaultValue != nil { v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) if err != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index c6293086750..6de31c2ebdb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -116,18 +116,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } - // Verify that field names in proto3 do not conflict if lowercased - // with all underscores removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 - names := map[string]protoreflect.FieldDescriptor{} - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) - if f2, ok := names[s]; ok { - return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - names[s] = f1 - } } for j, fd := range md.GetField() { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 00102d31178..ea154eec44d 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -485,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 5b80afe5204..cd8fadbaf8f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -510,7 +510,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(interface{}) Value + ValueOf(any) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,13 +519,13 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) interface{} + InterfaceOf(Value) any // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool + IsValidInterface(any) bool } // EnumDescriptor describes an enum and diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 7ced876f4e8..75f83a2af03 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -32,11 +32,11 @@ const ( type value struct { pragma.DoNotCompare // 0B - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface any // 16B } func valueOfString(v string) Value { @@ -45,7 +45,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, bin: v} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { return Value{typ: ifaceType, iface: v} } @@ -55,6 +55,6 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return v.bin } -func (v Value) getIface() interface{} { +func (v Value) getIface() any { return v.iface } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 1603097311e..9fe83cef5a9 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -69,8 +69,8 @@ import ( // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v interface{}) Value { +func ValueOf(v any) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an interface{}. +// Interface returns v as an any. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { +func (v Value) Interface() any { switch v.typ { case nilType: return nil @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { +// Interface returns k as an any. +func (k MapKey) Interface() any { return Value(k).Interface() } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index b1fdbe3e8e1..7f3583ead81 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -45,7 +45,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +80,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +93,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index 43547011173..f7d386990a0 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -15,7 +15,7 @@ import ( type ( ifaceHeader struct { - _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. Type unsafe.Pointer Data unsafe.Pointer } @@ -37,7 +37,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -70,7 +70,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -81,7 +81,7 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return unsafe.Slice((*byte)(v.ptr), v.num) } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 6267dc52a67..de177733914 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} + descsByName map[protoreflect.FullName]any filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ + r.descsByName = map[protoreflect.FullName]any{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]interface{} + typesByName map[protoreflect.FullName]any extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t interface{}) string { +func typeName(t any) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t interface{}) string { } } -func amendErrorWithCaller(err error, prev, curr interface{}) error { +func amendErrorWithCaller(err error, prev, curr any) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v interface{}) string { +func goPackage(v any) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 10c9030eb03..9403eb07507 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -3012,6 +3012,8 @@ type EnumValueOptions struct { // out when using debug formats, e.g. when the field contains sensitive // credentials. DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + // Information about the support window of a feature value. + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -3075,6 +3077,13 @@ func (x *EnumValueOptions) GetDebugRedact() bool { return Default_EnumValueOptions_DebugRedact } +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -4706,7 +4715,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, @@ -4779,438 +4788,445 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, - 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, - 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, - 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, - 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, - 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, - 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, - 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, - 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, - 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, - 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, - 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, - 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, - 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, - 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, - 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, - 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, - 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, - 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, - 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, - 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, - 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, - 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, - 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, - 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, - 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, - 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, - 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, + 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, + 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, + 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, + 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, - 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, - 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, - 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, - 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x0a, 0x0a, - 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, - 0x3f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, - 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, - 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, - 0x6c, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, - 0x08, 0xe8, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, - 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x42, 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, - 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, - 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, - 0x07, 0x52, 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, - 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, - 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, - 0x45, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, - 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, - 0x26, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, - 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, - 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, - 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, - 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, - 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, - 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, - 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, - 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, - 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, - 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, - 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, - 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, - 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, - 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, - 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, - 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, - 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, - 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, - 0x03, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, - 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, - 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, - 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, - 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, - 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, - 0x2a, 0x06, 0x08, 0xea, 0x07, 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x86, 0x4e, 0x10, 0x87, 0x4e, - 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, - 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xd9, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, - 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, - 0xe2, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, + 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, + 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, - 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, - 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, - 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, - 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, - 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, - 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, - 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, - 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, - 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, + 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, + 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, + 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, + 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, + 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, + 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, + 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, + 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, + 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, + 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, + 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, + 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, + 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, + 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, + 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, + 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, + 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, + 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, + 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, + 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, + 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, + 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, + 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, + 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, + 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, + 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -5227,7 +5243,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ +var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type @@ -5329,38 +5345,39 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 49: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 50: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 51: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 52: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 53: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 54: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 55: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 56: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 57: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 58: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 59: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 60: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 61: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 62: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 63: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 64: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 65: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 66: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 67: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 68: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 69: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 75: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 76, // [76:76] is the sub-list for method output_type - 76, // [76:76] is the sub-list for method input_type - 76, // [76:76] is the sub-list for extension type_name - 76, // [76:76] is the sub-list for extension extendee - 0, // [0:76] is the sub-list for field type_name + 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 77, // [77:77] is the sub-list for method output_type + 77, // [77:77] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5369,7 +5386,7 @@ func file_google_protobuf_descriptor_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorSet); i { case 0: return &v.state @@ -5381,7 +5398,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorProto); i { case 0: return &v.state @@ -5393,7 +5410,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto); i { case 0: return &v.state @@ -5405,7 +5422,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions); i { case 0: return &v.state @@ -5419,7 +5436,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*FieldDescriptorProto); i { case 0: return &v.state @@ -5431,7 +5448,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OneofDescriptorProto); i { case 0: return &v.state @@ -5443,7 +5460,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto); i { case 0: return &v.state @@ -5455,7 +5472,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*EnumValueDescriptorProto); i { case 0: return &v.state @@ -5467,7 +5484,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ServiceDescriptorProto); i { case 0: return &v.state @@ -5479,7 +5496,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*MethodDescriptorProto); i { case 0: return &v.state @@ -5491,7 +5508,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*FileOptions); i { case 0: return &v.state @@ -5505,7 +5522,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*MessageOptions); i { case 0: return &v.state @@ -5519,7 +5536,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions); i { case 0: return &v.state @@ -5533,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*OneofOptions); i { case 0: return &v.state @@ -5547,7 +5564,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*EnumOptions); i { case 0: return &v.state @@ -5561,7 +5578,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*EnumValueOptions); i { case 0: return &v.state @@ -5575,7 +5592,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ServiceOptions); i { case 0: return &v.state @@ -5589,7 +5606,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*MethodOptions); i { case 0: return &v.state @@ -5603,7 +5620,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption); i { case 0: return &v.state @@ -5615,7 +5632,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*FeatureSet); i { case 0: return &v.state @@ -5629,7 +5646,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state @@ -5641,7 +5658,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo); i { case 0: return &v.state @@ -5653,7 +5670,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state @@ -5665,7 +5682,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state @@ -5677,7 +5694,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state @@ -5689,7 +5706,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state @@ -5701,7 +5718,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state @@ -5713,7 +5730,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_EditionDefault); i { case 0: return &v.state @@ -5725,7 +5742,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_FeatureSupport); i { case 0: return &v.state @@ -5737,7 +5754,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state @@ -5749,7 +5766,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { case 0: return &v.state @@ -5761,7 +5778,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo_Location); i { case 0: return &v.state @@ -5773,7 +5790,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index b0df3fb3340..a2ca940c50f 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -90,27 +90,27 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc9, 0x01, 0x0a, 0x0a, 0x47, 0x6f, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x7d, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, 0x65, 0x18, 0x84, - 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, 0x07, 0xb2, 0x01, - 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, 0x20, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x53, 0x4f, - 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74, - 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x73, 0x6f, - 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, - 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, - 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x70, 0x62, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, + 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, + 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, } var ( @@ -126,7 +126,7 @@ func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { } var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_go_features_proto_goTypes = []interface{}{ +var file_google_protobuf_go_features_proto_goTypes = []any{ (*GoFeatures)(nil), // 0: pb.GoFeatures (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet } @@ -146,7 +146,7 @@ func file_google_protobuf_go_features_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GoFeatures); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 9de51be5403..7172b43d383 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -445,7 +445,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte { } var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ +var file_google_protobuf_any_proto_goTypes = []any{ (*Any)(nil), // 0: google.protobuf.Any } var file_google_protobuf_any_proto_depIdxs = []int32{ @@ -462,7 +462,7 @@ func file_google_protobuf_any_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Any); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index df709a8dd4c..1b71bcd910a 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -323,7 +323,7 @@ func file_google_protobuf_duration_proto_rawDescGZIP() []byte { } var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []interface{}{ +var file_google_protobuf_duration_proto_goTypes = []any{ (*Duration)(nil), // 0: google.protobuf.Duration } var file_google_protobuf_duration_proto_depIdxs = []int32{ @@ -340,7 +340,7 @@ func file_google_protobuf_duration_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Duration); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 9a7277ba394..d87b4fb8281 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -115,7 +115,7 @@ func file_google_protobuf_empty_proto_rawDescGZIP() []byte { } var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_empty_proto_goTypes = []interface{}{ +var file_google_protobuf_empty_proto_goTypes = []any{ (*Empty)(nil), // 0: google.protobuf.Empty } var file_google_protobuf_empty_proto_depIdxs = []int32{ @@ -132,7 +132,7 @@ func file_google_protobuf_empty_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index e8789cb331e..ac1e91bb6dd 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -537,7 +537,7 @@ func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { } var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_field_mask_proto_goTypes = []interface{}{ +var file_google_protobuf_field_mask_proto_goTypes = []any{ (*FieldMask)(nil), // 0: google.protobuf.FieldMask } var file_google_protobuf_field_mask_proto_depIdxs = []int32{ @@ -554,7 +554,7 @@ func file_google_protobuf_field_mask_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FieldMask); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d2bac8b88ea..d45361cbc72 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -49,11 +49,11 @@ // The standard Go "encoding/json" package has functionality to serialize // arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and // ListValue.AsSlice methods can convert the protobuf message representation into -// a form represented by interface{}, map[string]interface{}, and []interface{}. +// a form represented by any, map[string]any, and []any. // This form can be used with other packages that operate on such data structures // and also directly with the standard json package. // -// In order to convert the interface{}, map[string]interface{}, and []interface{} +// In order to convert the any, map[string]any, and []any // forms back as Value, Struct, and ListValue messages, use the NewStruct, // NewList, and NewValue constructor functions. // @@ -88,28 +88,28 @@ // // To construct a Value message representing the above JSON object: // -// m, err := structpb.NewValue(map[string]interface{}{ +// m, err := structpb.NewValue(map[string]any{ // "firstName": "John", // "lastName": "Smith", // "isAlive": true, // "age": 27, -// "address": map[string]interface{}{ +// "address": map[string]any{ // "streetAddress": "21 2nd Street", // "city": "New York", // "state": "NY", // "postalCode": "10021-3100", // }, -// "phoneNumbers": []interface{}{ -// map[string]interface{}{ +// "phoneNumbers": []any{ +// map[string]any{ // "type": "home", // "number": "212 555-1234", // }, -// map[string]interface{}{ +// map[string]any{ // "type": "office", // "number": "646 555-4567", // }, // }, -// "children": []interface{}{}, +// "children": []any{}, // "spouse": nil, // }) // if err != nil { @@ -197,7 +197,7 @@ type Struct struct { // NewStruct constructs a Struct from a general-purpose Go map. // The map keys must be valid UTF-8. // The map values are converted using NewValue. -func NewStruct(v map[string]interface{}) (*Struct, error) { +func NewStruct(v map[string]any) (*Struct, error) { x := &Struct{Fields: make(map[string]*Value, len(v))} for k, v := range v { if !utf8.ValidString(k) { @@ -214,9 +214,9 @@ func NewStruct(v map[string]interface{}) (*Struct, error) { // AsMap converts x to a general-purpose Go map. // The map values are converted by calling Value.AsInterface. -func (x *Struct) AsMap() map[string]interface{} { +func (x *Struct) AsMap() map[string]any { f := x.GetFields() - vs := make(map[string]interface{}, len(f)) + vs := make(map[string]any, len(f)) for k, v := range f { vs[k] = v.AsInterface() } @@ -306,13 +306,13 @@ type Value struct { // ║ float32, float64 │ stored as NumberValue ║ // ║ string │ stored as StringValue; must be valid UTF-8 ║ // ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]interface{} │ stored as StructValue ║ -// ║ []interface{} │ stored as ListValue ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ // ╚════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. -func NewValue(v interface{}) (*Value, error) { +func NewValue(v any) (*Value, error) { switch v := v.(type) { case nil: return NewNullValue(), nil @@ -342,13 +342,13 @@ func NewValue(v interface{}) (*Value, error) { case []byte: s := base64.StdEncoding.EncodeToString(v) return NewStringValue(s), nil - case map[string]interface{}: + case map[string]any: v2, err := NewStruct(v) if err != nil { return nil, err } return NewStructValue(v2), nil - case []interface{}: + case []any: v2, err := NewList(v) if err != nil { return nil, err @@ -396,7 +396,7 @@ func NewListValue(v *ListValue) *Value { // // Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are // converted as strings to remain compatible with MarshalJSON. -func (x *Value) AsInterface() interface{} { +func (x *Value) AsInterface() any { switch v := x.GetKind().(type) { case *Value_NumberValue: if v != nil { @@ -580,7 +580,7 @@ type ListValue struct { // NewList constructs a ListValue from a general-purpose Go slice. // The slice elements are converted using NewValue. -func NewList(v []interface{}) (*ListValue, error) { +func NewList(v []any) (*ListValue, error) { x := &ListValue{Values: make([]*Value, len(v))} for i, v := range v { var err error @@ -594,9 +594,9 @@ func NewList(v []interface{}) (*ListValue, error) { // AsSlice converts x to a general-purpose Go slice. // The slice elements are converted by calling Value.AsInterface. -func (x *ListValue) AsSlice() []interface{} { +func (x *ListValue) AsSlice() []any { vals := x.GetValues() - vs := make([]interface{}, len(vals)) + vs := make([]any, len(vals)) for i, v := range vals { vs[i] = v.AsInterface() } @@ -716,7 +716,7 @@ func file_google_protobuf_struct_proto_rawDescGZIP() []byte { var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_protobuf_struct_proto_goTypes = []interface{}{ +var file_google_protobuf_struct_proto_goTypes = []any{ (NullValue)(0), // 0: google.protobuf.NullValue (*Struct)(nil), // 1: google.protobuf.Struct (*Value)(nil), // 2: google.protobuf.Value @@ -743,7 +743,7 @@ func file_google_protobuf_struct_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Struct); i { case 0: return &v.state @@ -755,7 +755,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Value); i { case 0: return &v.state @@ -767,7 +767,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListValue); i { case 0: return &v.state @@ -780,7 +780,7 @@ func file_google_protobuf_struct_proto_init() { } } } - file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), (*Value_StringValue)(nil), diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 81511a3363e..83a5a645b08 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -332,7 +332,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { } var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ +var file_google_protobuf_timestamp_proto_goTypes = []any{ (*Timestamp)(nil), // 0: google.protobuf.Timestamp } var file_google_protobuf_timestamp_proto_depIdxs = []int32{ @@ -349,7 +349,7 @@ func file_google_protobuf_timestamp_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Timestamp); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 762a87130f8..e473f826aa3 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -605,7 +605,7 @@ func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { } var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_google_protobuf_wrappers_proto_goTypes = []interface{}{ +var file_google_protobuf_wrappers_proto_goTypes = []any{ (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue (*FloatValue)(nil), // 1: google.protobuf.FloatValue (*Int64Value)(nil), // 2: google.protobuf.Int64Value @@ -630,7 +630,7 @@ func file_google_protobuf_wrappers_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*DoubleValue); i { case 0: return &v.state @@ -642,7 +642,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FloatValue); i { case 0: return &v.state @@ -654,7 +654,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Int64Value); i { case 0: return &v.state @@ -666,7 +666,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*UInt64Value); i { case 0: return &v.state @@ -678,7 +678,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Int32Value); i { case 0: return &v.state @@ -690,7 +690,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*UInt32Value); i { case 0: return &v.state @@ -702,7 +702,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*BoolValue); i { case 0: return &v.state @@ -714,7 +714,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*StringValue); i { case 0: return &v.state @@ -726,7 +726,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*BytesValue); i { case 0: return &v.state diff --git a/vendor/modules.txt b/vendor/modules.txt index acd2a6da7d1..f6bfdf7dc17 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,11 +58,11 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo @@ -474,8 +474,8 @@ github.com/golang/protobuf/ptypes/timestamp # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy -# github.com/google/btree v1.0.1 -## explicit; go 1.12 +# github.com/google/btree v1.1.2 +## explicit; go 1.18 github.com/google/btree # github.com/google/go-cmp v0.6.0 ## explicit; go 1.13 @@ -552,7 +552,7 @@ github.com/hashicorp/errwrap # github.com/hashicorp/go-cleanhttp v0.5.2 ## explicit; go 1.13 github.com/hashicorp/go-cleanhttp -# github.com/hashicorp/go-hclog v1.5.0 +# github.com/hashicorp/go-hclog v1.6.3 ## explicit; go 1.13 github.com/hashicorp/go-hclog # github.com/hashicorp/go-immutable-radix v1.3.1 @@ -620,7 +620,7 @@ github.com/klauspost/compress/snappy github.com/klauspost/compress/zlib github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/klauspost/cpuid/v2 v2.2.6 +# github.com/klauspost/cpuid/v2 v2.2.7 ## explicit; go 1.15 github.com/klauspost/cpuid/v2 # github.com/kylelemons/godebug v1.1.0 @@ -661,7 +661,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.70 +# github.com/minio/minio-go/v7 v7.0.72 ## explicit; go 1.21 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/credentials @@ -802,11 +802,11 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.54.0 => github.com/prometheus/common v0.53.0 +# github.com/prometheus/common v0.54.1-0.20240615204547-04635d2962f9 ## explicit; go 1.20 github.com/prometheus/common/config github.com/prometheus/common/expfmt -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg +github.com/prometheus/common/helpers/templates github.com/prometheus/common/model github.com/prometheus/common/route github.com/prometheus/common/version @@ -821,7 +821,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.52.2-0.20240606174736-edd558884b24 +# github.com/prometheus/prometheus v0.53.1-0.20240625160125-1abeebacb870 ## explicit; go 1.21 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -928,8 +928,8 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3 -## explicit; go 1.20 +# github.com/thanos-io/objstore v0.0.0-20240613135658-39f40b8d97f7 +## explicit; go 1.21 github.com/thanos-io/objstore github.com/thanos-io/objstore/exthttp github.com/thanos-io/objstore/providers/azure @@ -961,7 +961,7 @@ github.com/thanos-io/promql-engine/query github.com/thanos-io/promql-engine/ringbuffer github.com/thanos-io/promql-engine/storage github.com/thanos-io/promql-engine/storage/prometheus -# github.com/thanos-io/thanos v0.35.2-0.20240613160422-651a4a440e8c +# github.com/thanos-io/thanos v0.35.2-0.20240617212227-065e3dd75aac ## explicit; go 1.21 github.com/thanos-io/thanos/pkg/api/query/querypb github.com/thanos-io/thanos/pkg/block @@ -1154,8 +1154,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvut # go.opentelemetry.io/contrib/propagators/autoprop v0.38.0 ## explicit; go 1.18 go.opentelemetry.io/contrib/propagators/autoprop -# go.opentelemetry.io/contrib/propagators/aws v1.22.0 -## explicit; go 1.20 +# go.opentelemetry.io/contrib/propagators/aws v1.27.0 +## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/aws/xray # go.opentelemetry.io/contrib/propagators/b3 v1.13.0 ## explicit; go 1.18 @@ -1183,7 +1183,7 @@ go.opentelemetry.io/otel/semconv/v1.17.0/httpconv go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.25.0 -# go.opentelemetry.io/otel/bridge/opentracing v1.26.0 +# go.opentelemetry.io/otel/bridge/opentracing v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration @@ -1413,6 +1413,7 @@ google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental google.golang.org/grpc/grpclog google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal @@ -1454,8 +1455,8 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/test/bufconn -# google.golang.org/protobuf v1.34.1 -## explicit; go 1.17 +# google.golang.org/protobuf v1.34.2 +## explicit; go 1.20 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext diff --git a/website/package-lock.json b/website/package-lock.json index 5d285ac0c6c..6f7ed4c35e5 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -116,12 +116,23 @@ "dev": true }, "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dev": true, "requires": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" + }, + "dependencies": { + "fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "requires": { + "to-regex-range": "^5.0.1" + } + } } }, "browserslist": { @@ -357,15 +368,6 @@ "reusify": "^1.0.4" } }, - "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, - "requires": { - "to-regex-range": "^5.0.1" - } - }, "find-up": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",