From ce229a13182dc657db89c2eb82f2b9511861763f Mon Sep 17 00:00:00 2001 From: Marc Lopez Rubio Date: Thu, 8 Aug 2024 11:39:28 +0800 Subject: [PATCH] apm-aggregation: Remove FromVTPool() and ReturnToVTPool() calls --- copy/apm-aggregation/.github/dependabot.yml | 39 + .../.github/workflows/add-to-project.yaml | 17 + copy/apm-aggregation/.github/workflows/ci.yml | 35 + copy/apm-aggregation/.gitignore | 29 + copy/apm-aggregation/CODEOWNERS | 1 + copy/apm-aggregation/CODE_OF_CONDUCT.md | 3 + copy/apm-aggregation/LICENSE.txt | 93 + copy/apm-aggregation/Makefile | 55 + copy/apm-aggregation/NOTICE.txt | 2 + copy/apm-aggregation/README.md | 202 + .../aggregationpb/aggregation.pb.go | 1738 +++++ .../aggregationpb/aggregation_vtproto.pb.go | 5590 +++++++++++++++++ copy/apm-aggregation/aggregationpb/doc.go | 7 + .../aggregationpb/labels.pb.go | 325 + .../aggregationpb/labels_vtproto.pb.go | 839 +++ .../apm-aggregation/aggregators/aggregator.go | 777 +++ .../aggregators/aggregator_test.go | 1813 ++++++ .../aggregators/cachedeventsmap.go | 77 + copy/apm-aggregation/aggregators/codec.go | 480 ++ .../apm-aggregation/aggregators/codec_test.go | 136 + .../aggregators/combined_metrics_test.go | 410 ++ copy/apm-aggregation/aggregators/config.go | 279 + .../aggregators/config_test.go | 198 + copy/apm-aggregation/aggregators/converter.go | 1134 ++++ .../aggregators/converter_test.go | 885 +++ .../internal/constraint/constraint.go | 34 + .../internal/hdrhistogram/hdrhistogram.go | 393 ++ .../hdrhistogram/hdrhistogram_test.go | 96 + .../aggregators/internal/protohash/doc.go | 8 + .../internal/protohash/generate/main.go | 119 + .../internal/protohash/generated.go | 86 + .../aggregators/internal/telemetry/config.go | 43 + .../internal/telemetry/config_test.go | 47 + .../aggregators/internal/telemetry/metrics.go | 301 + .../internal/telemetry/metrics_test.go | 193 + copy/apm-aggregation/aggregators/logging.go | 23 + copy/apm-aggregation/aggregators/merger.go | 450 ++ .../aggregators/merger_test.go | 935 +++ copy/apm-aggregation/aggregators/models.go | 299 + .../aggregators/ndjson_bench_test.go | 127 + .../aggregators/nullable/bool.go | 46 + .../aggregators/nullable/doc.go | 6 + .../aggregators/testdata/.gitkeep | 0 copy/apm-aggregation/go.mod | 76 + copy/apm-aggregation/go.sum | 640 ++ copy/apm-aggregation/proto/aggregation.proto | 137 + copy/apm-aggregation/proto/buf.yaml | 9 + copy/apm-aggregation/proto/labels.proto | 23 + .../testdata/sdh_apm_1442_span.json | 1462 +++++ .../testdata/sdh_apm_1442_transaction.json | 1652 +++++ copy/apm-aggregation/tools/go.mod | 18 + copy/apm-aggregation/tools/go.sum | 23 + copy/apm-aggregation/tools/install-protoc.sh | 39 + copy/apm-aggregation/tools/tools.go | 16 + go.mod | 2 + 55 files changed, 22467 insertions(+) create mode 100644 copy/apm-aggregation/.github/dependabot.yml create mode 100644 copy/apm-aggregation/.github/workflows/add-to-project.yaml create mode 100644 copy/apm-aggregation/.github/workflows/ci.yml create mode 100644 copy/apm-aggregation/.gitignore create mode 100644 copy/apm-aggregation/CODEOWNERS create mode 100644 copy/apm-aggregation/CODE_OF_CONDUCT.md create mode 100644 copy/apm-aggregation/LICENSE.txt create mode 100644 copy/apm-aggregation/Makefile create mode 100644 copy/apm-aggregation/NOTICE.txt create mode 100644 copy/apm-aggregation/README.md create mode 100644 copy/apm-aggregation/aggregationpb/aggregation.pb.go create mode 100644 copy/apm-aggregation/aggregationpb/aggregation_vtproto.pb.go create mode 100644 copy/apm-aggregation/aggregationpb/doc.go create mode 100644 copy/apm-aggregation/aggregationpb/labels.pb.go create mode 100644 copy/apm-aggregation/aggregationpb/labels_vtproto.pb.go create mode 100644 copy/apm-aggregation/aggregators/aggregator.go create mode 100644 copy/apm-aggregation/aggregators/aggregator_test.go create mode 100644 copy/apm-aggregation/aggregators/cachedeventsmap.go create mode 100644 copy/apm-aggregation/aggregators/codec.go create mode 100644 copy/apm-aggregation/aggregators/codec_test.go create mode 100644 copy/apm-aggregation/aggregators/combined_metrics_test.go create mode 100644 copy/apm-aggregation/aggregators/config.go create mode 100644 copy/apm-aggregation/aggregators/config_test.go create mode 100644 copy/apm-aggregation/aggregators/converter.go create mode 100644 copy/apm-aggregation/aggregators/converter_test.go create mode 100644 copy/apm-aggregation/aggregators/internal/constraint/constraint.go create mode 100644 copy/apm-aggregation/aggregators/internal/hdrhistogram/hdrhistogram.go create mode 100644 copy/apm-aggregation/aggregators/internal/hdrhistogram/hdrhistogram_test.go create mode 100644 copy/apm-aggregation/aggregators/internal/protohash/doc.go create mode 100644 copy/apm-aggregation/aggregators/internal/protohash/generate/main.go create mode 100644 copy/apm-aggregation/aggregators/internal/protohash/generated.go create mode 100644 copy/apm-aggregation/aggregators/internal/telemetry/config.go create mode 100644 copy/apm-aggregation/aggregators/internal/telemetry/config_test.go create mode 100644 copy/apm-aggregation/aggregators/internal/telemetry/metrics.go create mode 100644 copy/apm-aggregation/aggregators/internal/telemetry/metrics_test.go create mode 100644 copy/apm-aggregation/aggregators/logging.go create mode 100644 copy/apm-aggregation/aggregators/merger.go create mode 100644 copy/apm-aggregation/aggregators/merger_test.go create mode 100644 copy/apm-aggregation/aggregators/models.go create mode 100644 copy/apm-aggregation/aggregators/ndjson_bench_test.go create mode 100644 copy/apm-aggregation/aggregators/nullable/bool.go create mode 100644 copy/apm-aggregation/aggregators/nullable/doc.go create mode 100644 copy/apm-aggregation/aggregators/testdata/.gitkeep create mode 100644 copy/apm-aggregation/go.mod create mode 100644 copy/apm-aggregation/go.sum create mode 100644 copy/apm-aggregation/proto/aggregation.proto create mode 100644 copy/apm-aggregation/proto/buf.yaml create mode 100644 copy/apm-aggregation/proto/labels.proto create mode 100644 copy/apm-aggregation/testdata/sdh_apm_1442_span.json create mode 100644 copy/apm-aggregation/testdata/sdh_apm_1442_transaction.json create mode 100644 copy/apm-aggregation/tools/go.mod create mode 100644 copy/apm-aggregation/tools/go.sum create mode 100755 copy/apm-aggregation/tools/install-protoc.sh create mode 100644 copy/apm-aggregation/tools/tools.go diff --git a/copy/apm-aggregation/.github/dependabot.yml b/copy/apm-aggregation/.github/dependabot.yml new file mode 100644 index 00000000000..b7615ccd762 --- /dev/null +++ b/copy/apm-aggregation/.github/dependabot.yml @@ -0,0 +1,39 @@ +version: 2 +updates: + # Go dependencies + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "daily" + groups: + otel: + patterns: + - "go.opentelemetry.io/*" + golang.org/x/: + patterns: + - "golang.org/x/*" + go-agent: + patterns: + - "go.elastic.co/apm*" + # go tools + - package-ecosystem: "gomod" + directory: "tools/" + schedule: + interval: "weekly" + groups: + dependencies: + patterns: + - "*" + # GitHub actions + - package-ecosystem: "github-actions" + directory: "/" + reviewers: + - "elastic/observablt-ci" + schedule: + interval: "weekly" + day: "sunday" + time: "22:00" + groups: + github-actions: + patterns: + - "*" diff --git a/copy/apm-aggregation/.github/workflows/add-to-project.yaml b/copy/apm-aggregation/.github/workflows/add-to-project.yaml new file mode 100644 index 00000000000..1d8c60bf74e --- /dev/null +++ b/copy/apm-aggregation/.github/workflows/add-to-project.yaml @@ -0,0 +1,17 @@ +name: Add new issues to the project board + +on: + issues: + types: + - opened + - transferred + +jobs: + add-to-project: + name: Add issue to project + runs-on: ubuntu-latest + steps: + - uses: actions/add-to-project@v1.0.2 + with: + project-url: https://github.com/orgs/elastic/projects/1286 + github-token: ${{ secrets.APM_TECH_USER_TOKEN }} \ No newline at end of file diff --git a/copy/apm-aggregation/.github/workflows/ci.yml b/copy/apm-aggregation/.github/workflows/ci.yml new file mode 100644 index 00000000000..56b2fd2221e --- /dev/null +++ b/copy/apm-aggregation/.github/workflows/ci.yml @@ -0,0 +1,35 @@ +name: ci +on: ["push", "pull_request"] + +permissions: + contents: read + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + - run: make lint + - run: make fmt + - name: Verify repo is up-to-date + run: | + if [ -n "$(git status --porcelain)" ]; then + echo 'Updates required:' + git status + exit 1 + fi + + run-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + - name: Run tests + run: make test diff --git a/copy/apm-aggregation/.gitignore b/copy/apm-aggregation/.gitignore new file mode 100644 index 00000000000..7bfde17709d --- /dev/null +++ b/copy/apm-aggregation/.gitignore @@ -0,0 +1,29 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin +testbin/* +__debug_bin + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# editor and IDE paraphernalia +.idea +*.swp +*.sw +*.iml +*~ +*.DS_Store + +build + +# git ignore ndjson testdata files to avoid duplication across repos +# until we figure out a better way to do so +/aggregators/testdata/*.ndjson diff --git a/copy/apm-aggregation/CODEOWNERS b/copy/apm-aggregation/CODEOWNERS new file mode 100644 index 00000000000..d24d850f19d --- /dev/null +++ b/copy/apm-aggregation/CODEOWNERS @@ -0,0 +1 @@ +* @elastic/obs-ds-intake-services diff --git a/copy/apm-aggregation/CODE_OF_CONDUCT.md b/copy/apm-aggregation/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..c286a3152c4 --- /dev/null +++ b/copy/apm-aggregation/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +303 See Other + +Location: https://www.elastic.co/community/codeofconduct diff --git a/copy/apm-aggregation/LICENSE.txt b/copy/apm-aggregation/LICENSE.txt new file mode 100644 index 00000000000..92503a72178 --- /dev/null +++ b/copy/apm-aggregation/LICENSE.txt @@ -0,0 +1,93 @@ +Elastic License 2.0 + +URL: https://www.elastic.co/licensing/elastic-license + +## Acceptance + +By using the software, you agree to all of the terms and conditions below. + +## Copyright License + +The licensor grants you a non-exclusive, royalty-free, worldwide, +non-sublicensable, non-transferable license to use, copy, distribute, make +available, and prepare derivative works of the software, in each case subject to +the limitations and conditions below. + +## Limitations + +You may not provide the software to third parties as a hosted or managed +service, where the service provides users with access to any substantial set of +the features or functionality of the software. + +You may not move, change, disable, or circumvent the license key functionality +in the software, and you may not remove or obscure any functionality in the +software that is protected by the license key. + +You may not alter, remove, or obscure any licensing, copyright, or other notices +of the licensor in the software. Any use of the licensor’s trademarks is subject +to applicable law. + +## Patents + +The licensor grants you a license, under any patent claims the licensor can +license, or becomes able to license, to make, have made, use, sell, offer for +sale, import and have imported the software, in each case subject to the +limitations and conditions in this license. This license does not cover any +patent claims that you cause to be infringed by modifications or additions to +the software. If you or your company make any written claim that the software +infringes or contributes to infringement of any patent, your patent license for +the software granted under these terms ends immediately. If your company makes +such a claim, your patent license ends immediately for work on behalf of your +company. + +## Notices + +You must ensure that anyone who gets a copy of any part of the software from you +also gets a copy of these terms. + +If you modify the software, you must include in any modified copies of the +software prominent notices stating that you have modified the software. + +## No Other Rights + +These terms do not imply any licenses other than those expressly granted in +these terms. + +## Termination + +If you use the software in violation of these terms, such use is not licensed, +and your licenses will automatically terminate. If the licensor provides you +with a notice of your violation, and you cease all violation of this license no +later than 30 days after you receive that notice, your licenses will be +reinstated retroactively. However, if you violate these terms after such +reinstatement, any additional violation of these terms will cause your licenses +to terminate automatically and permanently. + +## No Liability + +*As far as the law allows, the software comes as is, without any warranty or +condition, and the licensor will not be liable to you for any damages arising +out of these terms or the use or nature of the software, under any kind of +legal claim.* + +## Definitions + +The **licensor** is the entity offering these terms, and the **software** is the +software the licensor makes available under these terms, including any portion +of it. + +**you** refers to the individual or entity agreeing to these terms. + +**your company** is any legal entity, sole proprietorship, or other kind of +organization that you work for, plus all organizations that have control over, +are under the control of, or are under common control with that +organization. **control** means ownership of substantially all the assets of an +entity, or the power to direct its management and policies by vote, contract, or +otherwise. Control can be direct or indirect. + +**your licenses** are all the licenses granted to you for the software under +these terms. + +**use** means anything you do with the software requiring one of your licenses. + +**trademark** means trademarks, service marks, and similar rights. \ No newline at end of file diff --git a/copy/apm-aggregation/Makefile b/copy/apm-aggregation/Makefile new file mode 100644 index 00000000000..a784c793b65 --- /dev/null +++ b/copy/apm-aggregation/Makefile @@ -0,0 +1,55 @@ +.DEFAULT_GOAL := all +all: test + +fmt: tools/go.mod + @go run -modfile=tools/go.mod github.com/elastic/go-licenser -license=Elasticv2 . + @go run -modfile=tools/go.mod golang.org/x/tools/cmd/goimports -local github.com/elastic/ -w . + +lint: tools/go.mod + for dir in $(shell find . -type f -name go.mod -exec dirname '{}' \;); do (cd $$dir && go mod tidy && git diff --stat --exit-code -- go.mod go.sum) || exit $$?; done + go run -modfile=tools/go.mod honnef.co/go/tools/cmd/staticcheck -checks=all ./... + +protolint: + docker run --volume "$(PWD):/workspace" --workdir /workspace bufbuild/buf lint proto + docker run --volume "$(PWD):/workspace" --workdir /workspace bufbuild/buf breaking proto --against https://github.com/elastic/apm-aggregation.git#branch=main,subdir=proto + +.PHONY: clean +clean: + rm -fr bin build + +.PHONY: test +test: go.mod + go test -v -race ./... + +############################################################################## +# Protobuf generation +############################################################################## + +GITROOT ?= $(shell git rev-parse --show-toplevel) +GOOSBUILD:=$(GITROOT)/build/$(shell go env GOOS) +PROTOC=$(GOOSBUILD)/protoc/bin/protoc +PROTOC_GEN_GO_VTPROTO=$(GOOSBUILD)/protoc-gen-go-vtproto +PROTOC_GEN_GO=$(GOOSBUILD)/protoc-gen-go + +$(PROTOC): + @./tools/install-protoc.sh + +$(PROTOC_GEN_GO_VTPROTO): $(GITROOT)/tools/go.mod + go build -o $@ -modfile=$< github.com/planetscale/vtprotobuf/cmd/protoc-gen-go-vtproto + +$(PROTOC_GEN_GO): $(GITROOT)/tools/go.mod + go build -o $@ -modfile=$< google.golang.org/protobuf/cmd/protoc-gen-go + +PROTOC_OUT?=. + +.PHONY: gen-proto +gen-proto: $(PROTOC_GEN_GO) $(PROTOC_GEN_GO_VTPROTO) $(PROTOC) + $(eval STRUCTS := $(shell grep '^message' proto/*.proto | cut -d ' ' -f2)) + $(eval PROTOC_VT_STRUCTS := $(shell for s in $(STRUCTS); do echo --go-vtproto_opt=pool=./aggregationpb.$$s ;done)) + $(PROTOC) -I . --go_out=$(PROTOC_OUT) --plugin protoc-gen-go="$(PROTOC_GEN_GO)" \ + --go-vtproto_out=$(PROTOC_OUT) --plugin protoc-gen-go-vtproto="$(PROTOC_GEN_GO_VTPROTO)" \ + --go-vtproto_opt=features=marshal+unmarshal+size+pool+clone \ + $(PROTOC_VT_STRUCTS) \ + $(wildcard proto/*.proto) + go generate ./aggregators/internal/protohash + $(MAKE) fmt diff --git a/copy/apm-aggregation/NOTICE.txt b/copy/apm-aggregation/NOTICE.txt new file mode 100644 index 00000000000..26762c7733a --- /dev/null +++ b/copy/apm-aggregation/NOTICE.txt @@ -0,0 +1,2 @@ +Elastic APM Aggregation +Copyright 2023-2023 Elasticsearch B.V. diff --git a/copy/apm-aggregation/README.md b/copy/apm-aggregation/README.md new file mode 100644 index 00000000000..1c086ab4e02 --- /dev/null +++ b/copy/apm-aggregation/README.md @@ -0,0 +1,202 @@ +# apm-aggregation + +APM metrics aggregation library that implements an LSM (Log Structured Merge tree)-based metrics aggregator. + +Files are subject to Elastic License v2. See LICENSE.txt for more. + +## Instrumentation + +`apm-aggregation` uses OTEL to instrument itself. Instrumentation produces a set +of metrics to help monitor the status of aggregations. This section describes the +metrics produced by `apm-aggregation` in detail. + +### Instrumentation Areas + +`apm-aggregation` aggregates metrics using LSM based key-value store [pebble](https://github.com/cockroachdb/pebble). +The intrumentation covers two broad areas: + +1. The core aggregation logic, including ingestion and harvest. +2. Performance of pebble database. + +### Metrics + +`apm-aggregation` records and publishes the following metrics: + +#### `events.processed.count` + +- Type: `Float64Counter` + +The number of processed APM Events. It includes successfully and unsuccessfully +processed events, which are reported as dimensions. + +##### Dimensions + +- [`combined_metrics_id`](#combined_metrics_id) +- [`aggregation_interval`](#aggregation_interval) +- [`outcome`](#outcome) + +#### `events.processed.bytes` + +- Type: `Int64Counter` + +The number of encoded bytes processed by the aggregator. This reports the same number +of bytes that is written to the underlying db. + +##### Dimensions + +- [`combined_metrics_id`](#combined_metrics_id) +- [`outcome`](#outcome) + +#### `events.processed.latency` + +- Type: `Float64Histogram` + +The processing delay for a batch of APM events accepted at a specific processing +time. It is recorded after removing any expected delays due to aggregation interval +or configuration. + +##### Dimensions + +- [`combined_metrics_id`](#combined_metrics_id) +- [`aggregation_interval`](#aggregation_interval) +- [`outcome`](#outcome) + +#### `events.processed.queued-latency` + +- Type: `Float64Histogram` + +The delay in processing a batch based on the youngest APM event received in the batch. + +##### Dimensions + +- [`combined_metrics_id`](#combined_metrics_id) +- [`aggregation_interval`](#aggregation_interval) +- [`outcome`](#outcome) + +#### `metrics.overflowed.count` + +- Type: `Int64Counter` + +Estimated number of metric aggregation keys that resulted in an overflow, per interval and aggregation type. + +##### Dimensions + +- [`combined_metrics_id`](#combined_metrics_id) +- [`aggregation_interval`](#aggregation_interval) +- [`aggregation_type`](#aggregation_type) + +#### `pebble.flushes` + +- Type: `Int64ObservableCounter` + +The number of memtable flushes to disk. + +#### `pebble.flushed-bytes` + +- Type: `Int64ObservableCounter` + +The number of bytes written during a flush. + +#### `pebble.compactions` + +- Type: `Int64ObservableCounter` + +The number of table compactions performed by pebble. + +#### `pebble.ingested-bytes` + +- Type: `Int64ObservableCounter` + +The number of bytes ingested by pebble. + +#### `pebble.compacted-bytes-read` + +- Type: `Int64ObservableCounter` + +The number of bytes read during compaction. + +#### `pebble.compacted-bytes-written` + +- Type: `Int64ObservableCounter` + +The number of bytes written during compaction. + +#### `pebble.memtable.total-size` + +- Type: `Int64ObservableGauge` + +The current size of memtable in bytes. + +#### `pebble.disk.usage` + +- Type: `Int64ObservableGauge` + +The current total disk usage by pebble in bytes, including live and obsolete files. + +#### `pebble.read-amplification` + +- Type: `Int64ObservableGauge` + +The current read amplification for the db. + +#### `pebble.num-sstables` + +- Type: `Int64ObservableGauge` + +The current number of SSTables. + +#### `pebble.table-readers-mem-estimate` + +- Type: `Int64ObservableGauge` + +The memory in bytes used by pebble for index and fliter blocks. + +#### `pebble.estimated-pending-compaction` + +- Type: `Int64ObservableGauge` + +The current number of estimated bytes pending for compaction. + +#### `pebble.marked-for-compaction-files` + +- Type: `Int64ObservableGauge` + +The current number of SSTables marked for compaction. + +#### `pebble.keys.tombstone.count` + +- Type: `Int64ObservableGauge` + +The approximate count of delete keys across the storage engine. + +### Dimensions + +This section lists the general dimensions published by some of the metric. + +#### `combined_metrics_id` + +This is an optional dimension. The key and the value of this dimension depends +on the option `WithCombinedMetricsIDToKVs` passed to the aggregator. If this +option is not supplied then this dimension is omitted. + +#### `aggregation_interval` + +Holds the value of aggregation interval for which the combined metrics is produced. +For example: `1m`, `10m`, etc. + +#### `aggregation_type` + +Holds the the aggregation type for which an overflow occurred. +For example: `service`, `transaction`, `service_transaction`, `service_destination`. + +#### `outcome` + +##### `success` + +Events that have been successfully aggregated into the final combined metrics and +processed as part of the harvest. + +##### `failure` + +Events that failed to be aggregated for an reason and were dropped at any stage +in the aggregation process. diff --git a/copy/apm-aggregation/aggregationpb/aggregation.pb.go b/copy/apm-aggregation/aggregationpb/aggregation.pb.go new file mode 100644 index 00000000000..427254dfc42 --- /dev/null +++ b/copy/apm-aggregation/aggregationpb/aggregation.pb.go @@ -0,0 +1,1738 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v4.22.1 +// source: proto/aggregation.proto + +package aggregationpb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CombinedMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceMetrics []*KeyedServiceMetrics `protobuf:"bytes,1,rep,name=service_metrics,json=serviceMetrics,proto3" json:"service_metrics,omitempty"` + OverflowServices *Overflow `protobuf:"bytes,2,opt,name=overflow_services,json=overflowServices,proto3" json:"overflow_services,omitempty"` + OverflowServicesEstimator []byte `protobuf:"bytes,3,opt,name=overflow_services_estimator,json=overflowServicesEstimator,proto3" json:"overflow_services_estimator,omitempty"` + EventsTotal float64 `protobuf:"fixed64,4,opt,name=events_total,json=eventsTotal,proto3" json:"events_total,omitempty"` + YoungestEventTimestamp uint64 `protobuf:"varint,5,opt,name=youngest_event_timestamp,json=youngestEventTimestamp,proto3" json:"youngest_event_timestamp,omitempty"` +} + +func (x *CombinedMetrics) Reset() { + *x = CombinedMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CombinedMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CombinedMetrics) ProtoMessage() {} + +func (x *CombinedMetrics) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CombinedMetrics.ProtoReflect.Descriptor instead. +func (*CombinedMetrics) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{0} +} + +func (x *CombinedMetrics) GetServiceMetrics() []*KeyedServiceMetrics { + if x != nil { + return x.ServiceMetrics + } + return nil +} + +func (x *CombinedMetrics) GetOverflowServices() *Overflow { + if x != nil { + return x.OverflowServices + } + return nil +} + +func (x *CombinedMetrics) GetOverflowServicesEstimator() []byte { + if x != nil { + return x.OverflowServicesEstimator + } + return nil +} + +func (x *CombinedMetrics) GetEventsTotal() float64 { + if x != nil { + return x.EventsTotal + } + return 0 +} + +func (x *CombinedMetrics) GetYoungestEventTimestamp() uint64 { + if x != nil { + return x.YoungestEventTimestamp + } + return 0 +} + +type KeyedServiceMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *ServiceAggregationKey `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Metrics *ServiceMetrics `protobuf:"bytes,2,opt,name=metrics,proto3" json:"metrics,omitempty"` +} + +func (x *KeyedServiceMetrics) Reset() { + *x = KeyedServiceMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyedServiceMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyedServiceMetrics) ProtoMessage() {} + +func (x *KeyedServiceMetrics) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyedServiceMetrics.ProtoReflect.Descriptor instead. +func (*KeyedServiceMetrics) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{1} +} + +func (x *KeyedServiceMetrics) GetKey() *ServiceAggregationKey { + if x != nil { + return x.Key + } + return nil +} + +func (x *KeyedServiceMetrics) GetMetrics() *ServiceMetrics { + if x != nil { + return x.Metrics + } + return nil +} + +type ServiceAggregationKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + ServiceEnvironment string `protobuf:"bytes,3,opt,name=service_environment,json=serviceEnvironment,proto3" json:"service_environment,omitempty"` + ServiceLanguageName string `protobuf:"bytes,4,opt,name=service_language_name,json=serviceLanguageName,proto3" json:"service_language_name,omitempty"` + AgentName string `protobuf:"bytes,5,opt,name=agent_name,json=agentName,proto3" json:"agent_name,omitempty"` + GlobalLabelsStr []byte `protobuf:"bytes,6,opt,name=global_labels_str,json=globalLabelsStr,proto3" json:"global_labels_str,omitempty"` +} + +func (x *ServiceAggregationKey) Reset() { + *x = ServiceAggregationKey{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceAggregationKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceAggregationKey) ProtoMessage() {} + +func (x *ServiceAggregationKey) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceAggregationKey.ProtoReflect.Descriptor instead. +func (*ServiceAggregationKey) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{2} +} + +func (x *ServiceAggregationKey) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *ServiceAggregationKey) GetServiceName() string { + if x != nil { + return x.ServiceName + } + return "" +} + +func (x *ServiceAggregationKey) GetServiceEnvironment() string { + if x != nil { + return x.ServiceEnvironment + } + return "" +} + +func (x *ServiceAggregationKey) GetServiceLanguageName() string { + if x != nil { + return x.ServiceLanguageName + } + return "" +} + +func (x *ServiceAggregationKey) GetAgentName() string { + if x != nil { + return x.AgentName + } + return "" +} + +func (x *ServiceAggregationKey) GetGlobalLabelsStr() []byte { + if x != nil { + return x.GlobalLabelsStr + } + return nil +} + +type ServiceMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OverflowGroups *Overflow `protobuf:"bytes,1,opt,name=overflow_groups,json=overflowGroups,proto3" json:"overflow_groups,omitempty"` + TransactionMetrics []*KeyedTransactionMetrics `protobuf:"bytes,2,rep,name=transaction_metrics,json=transactionMetrics,proto3" json:"transaction_metrics,omitempty"` + ServiceTransactionMetrics []*KeyedServiceTransactionMetrics `protobuf:"bytes,3,rep,name=service_transaction_metrics,json=serviceTransactionMetrics,proto3" json:"service_transaction_metrics,omitempty"` + SpanMetrics []*KeyedSpanMetrics `protobuf:"bytes,4,rep,name=span_metrics,json=spanMetrics,proto3" json:"span_metrics,omitempty"` +} + +func (x *ServiceMetrics) Reset() { + *x = ServiceMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceMetrics) ProtoMessage() {} + +func (x *ServiceMetrics) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceMetrics.ProtoReflect.Descriptor instead. +func (*ServiceMetrics) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{3} +} + +func (x *ServiceMetrics) GetOverflowGroups() *Overflow { + if x != nil { + return x.OverflowGroups + } + return nil +} + +func (x *ServiceMetrics) GetTransactionMetrics() []*KeyedTransactionMetrics { + if x != nil { + return x.TransactionMetrics + } + return nil +} + +func (x *ServiceMetrics) GetServiceTransactionMetrics() []*KeyedServiceTransactionMetrics { + if x != nil { + return x.ServiceTransactionMetrics + } + return nil +} + +func (x *ServiceMetrics) GetSpanMetrics() []*KeyedSpanMetrics { + if x != nil { + return x.SpanMetrics + } + return nil +} + +type KeyedTransactionMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *TransactionAggregationKey `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Metrics *TransactionMetrics `protobuf:"bytes,2,opt,name=metrics,proto3" json:"metrics,omitempty"` +} + +func (x *KeyedTransactionMetrics) Reset() { + *x = KeyedTransactionMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyedTransactionMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyedTransactionMetrics) ProtoMessage() {} + +func (x *KeyedTransactionMetrics) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyedTransactionMetrics.ProtoReflect.Descriptor instead. +func (*KeyedTransactionMetrics) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{4} +} + +func (x *KeyedTransactionMetrics) GetKey() *TransactionAggregationKey { + if x != nil { + return x.Key + } + return nil +} + +func (x *KeyedTransactionMetrics) GetMetrics() *TransactionMetrics { + if x != nil { + return x.Metrics + } + return nil +} + +type TransactionAggregationKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TraceRoot bool `protobuf:"varint,1,opt,name=trace_root,json=traceRoot,proto3" json:"trace_root,omitempty"` + ContainerId string `protobuf:"bytes,2,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + KubernetesPodName string `protobuf:"bytes,3,opt,name=kubernetes_pod_name,json=kubernetesPodName,proto3" json:"kubernetes_pod_name,omitempty"` + ServiceVersion string `protobuf:"bytes,4,opt,name=service_version,json=serviceVersion,proto3" json:"service_version,omitempty"` + ServiceNodeName string `protobuf:"bytes,5,opt,name=service_node_name,json=serviceNodeName,proto3" json:"service_node_name,omitempty"` + ServiceRuntimeName string `protobuf:"bytes,6,opt,name=service_runtime_name,json=serviceRuntimeName,proto3" json:"service_runtime_name,omitempty"` + ServiceRuntimeVersion string `protobuf:"bytes,7,opt,name=service_runtime_version,json=serviceRuntimeVersion,proto3" json:"service_runtime_version,omitempty"` + ServiceLanguageVersion string `protobuf:"bytes,8,opt,name=service_language_version,json=serviceLanguageVersion,proto3" json:"service_language_version,omitempty"` + HostHostname string `protobuf:"bytes,9,opt,name=host_hostname,json=hostHostname,proto3" json:"host_hostname,omitempty"` + HostName string `protobuf:"bytes,10,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` + HostOsPlatform string `protobuf:"bytes,11,opt,name=host_os_platform,json=hostOsPlatform,proto3" json:"host_os_platform,omitempty"` + EventOutcome string `protobuf:"bytes,12,opt,name=event_outcome,json=eventOutcome,proto3" json:"event_outcome,omitempty"` + TransactionName string `protobuf:"bytes,13,opt,name=transaction_name,json=transactionName,proto3" json:"transaction_name,omitempty"` + TransactionType string `protobuf:"bytes,14,opt,name=transaction_type,json=transactionType,proto3" json:"transaction_type,omitempty"` + TransactionResult string `protobuf:"bytes,15,opt,name=transaction_result,json=transactionResult,proto3" json:"transaction_result,omitempty"` + FaasColdstart uint32 `protobuf:"varint,16,opt,name=faas_coldstart,json=faasColdstart,proto3" json:"faas_coldstart,omitempty"` + FaasId string `protobuf:"bytes,17,opt,name=faas_id,json=faasId,proto3" json:"faas_id,omitempty"` + FaasName string `protobuf:"bytes,18,opt,name=faas_name,json=faasName,proto3" json:"faas_name,omitempty"` + FaasVersion string `protobuf:"bytes,19,opt,name=faas_version,json=faasVersion,proto3" json:"faas_version,omitempty"` + FaasTriggerType string `protobuf:"bytes,20,opt,name=faas_trigger_type,json=faasTriggerType,proto3" json:"faas_trigger_type,omitempty"` + CloudProvider string `protobuf:"bytes,21,opt,name=cloud_provider,json=cloudProvider,proto3" json:"cloud_provider,omitempty"` + CloudRegion string `protobuf:"bytes,22,opt,name=cloud_region,json=cloudRegion,proto3" json:"cloud_region,omitempty"` + CloudAvailabilityZone string `protobuf:"bytes,23,opt,name=cloud_availability_zone,json=cloudAvailabilityZone,proto3" json:"cloud_availability_zone,omitempty"` + CloudServiceName string `protobuf:"bytes,24,opt,name=cloud_service_name,json=cloudServiceName,proto3" json:"cloud_service_name,omitempty"` + CloudAccountId string `protobuf:"bytes,25,opt,name=cloud_account_id,json=cloudAccountId,proto3" json:"cloud_account_id,omitempty"` + CloudAccountName string `protobuf:"bytes,26,opt,name=cloud_account_name,json=cloudAccountName,proto3" json:"cloud_account_name,omitempty"` + CloudMachineType string `protobuf:"bytes,27,opt,name=cloud_machine_type,json=cloudMachineType,proto3" json:"cloud_machine_type,omitempty"` + CloudProjectId string `protobuf:"bytes,28,opt,name=cloud_project_id,json=cloudProjectId,proto3" json:"cloud_project_id,omitempty"` + CloudProjectName string `protobuf:"bytes,29,opt,name=cloud_project_name,json=cloudProjectName,proto3" json:"cloud_project_name,omitempty"` +} + +func (x *TransactionAggregationKey) Reset() { + *x = TransactionAggregationKey{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionAggregationKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionAggregationKey) ProtoMessage() {} + +func (x *TransactionAggregationKey) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionAggregationKey.ProtoReflect.Descriptor instead. +func (*TransactionAggregationKey) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{5} +} + +func (x *TransactionAggregationKey) GetTraceRoot() bool { + if x != nil { + return x.TraceRoot + } + return false +} + +func (x *TransactionAggregationKey) GetContainerId() string { + if x != nil { + return x.ContainerId + } + return "" +} + +func (x *TransactionAggregationKey) GetKubernetesPodName() string { + if x != nil { + return x.KubernetesPodName + } + return "" +} + +func (x *TransactionAggregationKey) GetServiceVersion() string { + if x != nil { + return x.ServiceVersion + } + return "" +} + +func (x *TransactionAggregationKey) GetServiceNodeName() string { + if x != nil { + return x.ServiceNodeName + } + return "" +} + +func (x *TransactionAggregationKey) GetServiceRuntimeName() string { + if x != nil { + return x.ServiceRuntimeName + } + return "" +} + +func (x *TransactionAggregationKey) GetServiceRuntimeVersion() string { + if x != nil { + return x.ServiceRuntimeVersion + } + return "" +} + +func (x *TransactionAggregationKey) GetServiceLanguageVersion() string { + if x != nil { + return x.ServiceLanguageVersion + } + return "" +} + +func (x *TransactionAggregationKey) GetHostHostname() string { + if x != nil { + return x.HostHostname + } + return "" +} + +func (x *TransactionAggregationKey) GetHostName() string { + if x != nil { + return x.HostName + } + return "" +} + +func (x *TransactionAggregationKey) GetHostOsPlatform() string { + if x != nil { + return x.HostOsPlatform + } + return "" +} + +func (x *TransactionAggregationKey) GetEventOutcome() string { + if x != nil { + return x.EventOutcome + } + return "" +} + +func (x *TransactionAggregationKey) GetTransactionName() string { + if x != nil { + return x.TransactionName + } + return "" +} + +func (x *TransactionAggregationKey) GetTransactionType() string { + if x != nil { + return x.TransactionType + } + return "" +} + +func (x *TransactionAggregationKey) GetTransactionResult() string { + if x != nil { + return x.TransactionResult + } + return "" +} + +func (x *TransactionAggregationKey) GetFaasColdstart() uint32 { + if x != nil { + return x.FaasColdstart + } + return 0 +} + +func (x *TransactionAggregationKey) GetFaasId() string { + if x != nil { + return x.FaasId + } + return "" +} + +func (x *TransactionAggregationKey) GetFaasName() string { + if x != nil { + return x.FaasName + } + return "" +} + +func (x *TransactionAggregationKey) GetFaasVersion() string { + if x != nil { + return x.FaasVersion + } + return "" +} + +func (x *TransactionAggregationKey) GetFaasTriggerType() string { + if x != nil { + return x.FaasTriggerType + } + return "" +} + +func (x *TransactionAggregationKey) GetCloudProvider() string { + if x != nil { + return x.CloudProvider + } + return "" +} + +func (x *TransactionAggregationKey) GetCloudRegion() string { + if x != nil { + return x.CloudRegion + } + return "" +} + +func (x *TransactionAggregationKey) GetCloudAvailabilityZone() string { + if x != nil { + return x.CloudAvailabilityZone + } + return "" +} + +func (x *TransactionAggregationKey) GetCloudServiceName() string { + if x != nil { + return x.CloudServiceName + } + return "" +} + +func (x *TransactionAggregationKey) GetCloudAccountId() string { + if x != nil { + return x.CloudAccountId + } + return "" +} + +func (x *TransactionAggregationKey) GetCloudAccountName() string { + if x != nil { + return x.CloudAccountName + } + return "" +} + +func (x *TransactionAggregationKey) GetCloudMachineType() string { + if x != nil { + return x.CloudMachineType + } + return "" +} + +func (x *TransactionAggregationKey) GetCloudProjectId() string { + if x != nil { + return x.CloudProjectId + } + return "" +} + +func (x *TransactionAggregationKey) GetCloudProjectName() string { + if x != nil { + return x.CloudProjectName + } + return "" +} + +type TransactionMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Histogram *HDRHistogram `protobuf:"bytes,1,opt,name=histogram,proto3" json:"histogram,omitempty"` +} + +func (x *TransactionMetrics) Reset() { + *x = TransactionMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionMetrics) ProtoMessage() {} + +func (x *TransactionMetrics) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionMetrics.ProtoReflect.Descriptor instead. +func (*TransactionMetrics) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{6} +} + +func (x *TransactionMetrics) GetHistogram() *HDRHistogram { + if x != nil { + return x.Histogram + } + return nil +} + +type KeyedServiceTransactionMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *ServiceTransactionAggregationKey `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Metrics *ServiceTransactionMetrics `protobuf:"bytes,2,opt,name=metrics,proto3" json:"metrics,omitempty"` +} + +func (x *KeyedServiceTransactionMetrics) Reset() { + *x = KeyedServiceTransactionMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyedServiceTransactionMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyedServiceTransactionMetrics) ProtoMessage() {} + +func (x *KeyedServiceTransactionMetrics) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyedServiceTransactionMetrics.ProtoReflect.Descriptor instead. +func (*KeyedServiceTransactionMetrics) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{7} +} + +func (x *KeyedServiceTransactionMetrics) GetKey() *ServiceTransactionAggregationKey { + if x != nil { + return x.Key + } + return nil +} + +func (x *KeyedServiceTransactionMetrics) GetMetrics() *ServiceTransactionMetrics { + if x != nil { + return x.Metrics + } + return nil +} + +type ServiceTransactionAggregationKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TransactionType string `protobuf:"bytes,1,opt,name=transaction_type,json=transactionType,proto3" json:"transaction_type,omitempty"` +} + +func (x *ServiceTransactionAggregationKey) Reset() { + *x = ServiceTransactionAggregationKey{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceTransactionAggregationKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceTransactionAggregationKey) ProtoMessage() {} + +func (x *ServiceTransactionAggregationKey) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceTransactionAggregationKey.ProtoReflect.Descriptor instead. +func (*ServiceTransactionAggregationKey) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{8} +} + +func (x *ServiceTransactionAggregationKey) GetTransactionType() string { + if x != nil { + return x.TransactionType + } + return "" +} + +type ServiceTransactionMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Histogram *HDRHistogram `protobuf:"bytes,1,opt,name=histogram,proto3" json:"histogram,omitempty"` + FailureCount float64 `protobuf:"fixed64,2,opt,name=failure_count,json=failureCount,proto3" json:"failure_count,omitempty"` + SuccessCount float64 `protobuf:"fixed64,3,opt,name=success_count,json=successCount,proto3" json:"success_count,omitempty"` +} + +func (x *ServiceTransactionMetrics) Reset() { + *x = ServiceTransactionMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceTransactionMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceTransactionMetrics) ProtoMessage() {} + +func (x *ServiceTransactionMetrics) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceTransactionMetrics.ProtoReflect.Descriptor instead. +func (*ServiceTransactionMetrics) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{9} +} + +func (x *ServiceTransactionMetrics) GetHistogram() *HDRHistogram { + if x != nil { + return x.Histogram + } + return nil +} + +func (x *ServiceTransactionMetrics) GetFailureCount() float64 { + if x != nil { + return x.FailureCount + } + return 0 +} + +func (x *ServiceTransactionMetrics) GetSuccessCount() float64 { + if x != nil { + return x.SuccessCount + } + return 0 +} + +type KeyedSpanMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *SpanAggregationKey `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Metrics *SpanMetrics `protobuf:"bytes,2,opt,name=metrics,proto3" json:"metrics,omitempty"` +} + +func (x *KeyedSpanMetrics) Reset() { + *x = KeyedSpanMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyedSpanMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyedSpanMetrics) ProtoMessage() {} + +func (x *KeyedSpanMetrics) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyedSpanMetrics.ProtoReflect.Descriptor instead. +func (*KeyedSpanMetrics) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{10} +} + +func (x *KeyedSpanMetrics) GetKey() *SpanAggregationKey { + if x != nil { + return x.Key + } + return nil +} + +func (x *KeyedSpanMetrics) GetMetrics() *SpanMetrics { + if x != nil { + return x.Metrics + } + return nil +} + +type SpanAggregationKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SpanName string `protobuf:"bytes,1,opt,name=span_name,json=spanName,proto3" json:"span_name,omitempty"` + Outcome string `protobuf:"bytes,2,opt,name=outcome,proto3" json:"outcome,omitempty"` + TargetType string `protobuf:"bytes,3,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` + TargetName string `protobuf:"bytes,4,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` + Resource string `protobuf:"bytes,5,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *SpanAggregationKey) Reset() { + *x = SpanAggregationKey{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SpanAggregationKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpanAggregationKey) ProtoMessage() {} + +func (x *SpanAggregationKey) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpanAggregationKey.ProtoReflect.Descriptor instead. +func (*SpanAggregationKey) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{11} +} + +func (x *SpanAggregationKey) GetSpanName() string { + if x != nil { + return x.SpanName + } + return "" +} + +func (x *SpanAggregationKey) GetOutcome() string { + if x != nil { + return x.Outcome + } + return "" +} + +func (x *SpanAggregationKey) GetTargetType() string { + if x != nil { + return x.TargetType + } + return "" +} + +func (x *SpanAggregationKey) GetTargetName() string { + if x != nil { + return x.TargetName + } + return "" +} + +func (x *SpanAggregationKey) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +type SpanMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count float64 `protobuf:"fixed64,1,opt,name=count,proto3" json:"count,omitempty"` + Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"` +} + +func (x *SpanMetrics) Reset() { + *x = SpanMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SpanMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpanMetrics) ProtoMessage() {} + +func (x *SpanMetrics) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpanMetrics.ProtoReflect.Descriptor instead. +func (*SpanMetrics) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{12} +} + +func (x *SpanMetrics) GetCount() float64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *SpanMetrics) GetSum() float64 { + if x != nil { + return x.Sum + } + return 0 +} + +type Overflow struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OverflowTransactions *TransactionMetrics `protobuf:"bytes,1,opt,name=overflow_transactions,json=overflowTransactions,proto3" json:"overflow_transactions,omitempty"` + OverflowServiceTransactions *ServiceTransactionMetrics `protobuf:"bytes,2,opt,name=overflow_service_transactions,json=overflowServiceTransactions,proto3" json:"overflow_service_transactions,omitempty"` + OverflowSpans *SpanMetrics `protobuf:"bytes,3,opt,name=overflow_spans,json=overflowSpans,proto3" json:"overflow_spans,omitempty"` + OverflowTransactionsEstimator []byte `protobuf:"bytes,4,opt,name=overflow_transactions_estimator,json=overflowTransactionsEstimator,proto3" json:"overflow_transactions_estimator,omitempty"` + OverflowServiceTransactionsEstimator []byte `protobuf:"bytes,5,opt,name=overflow_service_transactions_estimator,json=overflowServiceTransactionsEstimator,proto3" json:"overflow_service_transactions_estimator,omitempty"` + OverflowSpansEstimator []byte `protobuf:"bytes,6,opt,name=overflow_spans_estimator,json=overflowSpansEstimator,proto3" json:"overflow_spans_estimator,omitempty"` +} + +func (x *Overflow) Reset() { + *x = Overflow{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Overflow) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Overflow) ProtoMessage() {} + +func (x *Overflow) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Overflow.ProtoReflect.Descriptor instead. +func (*Overflow) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{13} +} + +func (x *Overflow) GetOverflowTransactions() *TransactionMetrics { + if x != nil { + return x.OverflowTransactions + } + return nil +} + +func (x *Overflow) GetOverflowServiceTransactions() *ServiceTransactionMetrics { + if x != nil { + return x.OverflowServiceTransactions + } + return nil +} + +func (x *Overflow) GetOverflowSpans() *SpanMetrics { + if x != nil { + return x.OverflowSpans + } + return nil +} + +func (x *Overflow) GetOverflowTransactionsEstimator() []byte { + if x != nil { + return x.OverflowTransactionsEstimator + } + return nil +} + +func (x *Overflow) GetOverflowServiceTransactionsEstimator() []byte { + if x != nil { + return x.OverflowServiceTransactionsEstimator + } + return nil +} + +func (x *Overflow) GetOverflowSpansEstimator() []byte { + if x != nil { + return x.OverflowSpansEstimator + } + return nil +} + +type HDRHistogram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LowestTrackableValue int64 `protobuf:"varint,1,opt,name=lowest_trackable_value,json=lowestTrackableValue,proto3" json:"lowest_trackable_value,omitempty"` + HighestTrackableValue int64 `protobuf:"varint,2,opt,name=highest_trackable_value,json=highestTrackableValue,proto3" json:"highest_trackable_value,omitempty"` + SignificantFigures int64 `protobuf:"varint,3,opt,name=significant_figures,json=significantFigures,proto3" json:"significant_figures,omitempty"` + Counts []int64 `protobuf:"varint,4,rep,packed,name=counts,proto3" json:"counts,omitempty"` + Buckets []int32 `protobuf:"varint,5,rep,packed,name=buckets,proto3" json:"buckets,omitempty"` +} + +func (x *HDRHistogram) Reset() { + *x = HDRHistogram{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_aggregation_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HDRHistogram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HDRHistogram) ProtoMessage() {} + +func (x *HDRHistogram) ProtoReflect() protoreflect.Message { + mi := &file_proto_aggregation_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HDRHistogram.ProtoReflect.Descriptor instead. +func (*HDRHistogram) Descriptor() ([]byte, []int) { + return file_proto_aggregation_proto_rawDescGZIP(), []int{14} +} + +func (x *HDRHistogram) GetLowestTrackableValue() int64 { + if x != nil { + return x.LowestTrackableValue + } + return 0 +} + +func (x *HDRHistogram) GetHighestTrackableValue() int64 { + if x != nil { + return x.HighestTrackableValue + } + return 0 +} + +func (x *HDRHistogram) GetSignificantFigures() int64 { + if x != nil { + return x.SignificantFigures + } + return 0 +} + +func (x *HDRHistogram) GetCounts() []int64 { + if x != nil { + return x.Counts + } + return nil +} + +func (x *HDRHistogram) GetBuckets() []int32 { + if x != nil { + return x.Buckets + } + return nil +} + +var File_proto_aggregation_proto protoreflect.FileDescriptor + +var file_proto_aggregation_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6c, 0x61, 0x73, 0x74, + 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x22, 0xc3, 0x02, 0x0a, 0x0f, 0x43, 0x6f, + 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x4c, 0x0a, + 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, + 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x65, 0x64, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x45, 0x0a, 0x11, 0x6f, + 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, + 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, + 0x52, 0x10, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x1b, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x19, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, + 0x77, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, + 0x6f, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x38, 0x0a, 0x18, 0x79, 0x6f, 0x75, 0x6e, 0x67, 0x65, 0x73, + 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x79, 0x6f, 0x75, 0x6e, 0x67, 0x65, 0x73, + 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, + 0x88, 0x01, 0x0a, 0x13, 0x4b, 0x65, 0x79, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x37, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, + 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x38, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x88, 0x02, 0x0a, 0x15, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x76, 0x69, 0x72, + 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x67, + 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x67, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x5f, 0x73, 0x74, 0x72, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x53, 0x74, 0x72, 0x22, 0xe2, 0x02, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6f, 0x76, 0x65, 0x72, + 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x0e, 0x6f, 0x76, 0x65, + 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x58, 0x0a, 0x13, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, + 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x65, 0x64, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x52, 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x6e, 0x0a, 0x1b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6c, 0x61, + 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x65, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x19, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x43, 0x0a, 0x0c, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6c, + 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, + 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0b, 0x73, + 0x70, 0x61, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x94, 0x01, 0x0a, 0x17, 0x4b, + 0x65, 0x79, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x3b, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, + 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x3c, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, + 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x22, 0xd6, 0x09, 0x0a, 0x19, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, + 0x1d, 0x0a, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x72, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x5f, + 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, + 0x6b, 0x75, 0x62, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x50, 0x6f, 0x64, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, + 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x38, 0x0a, 0x18, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x16, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x61, 0x6e, 0x67, 0x75, + 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x6f, + 0x73, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x68, 0x6f, 0x73, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, + 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6f, 0x73, 0x5f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x68, 0x6f, 0x73, 0x74, 0x4f, 0x73, 0x50, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x6f, 0x75, 0x74, 0x63, 0x6f, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x4f, 0x75, 0x74, 0x63, 0x6f, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x61, 0x61, 0x73, 0x5f, 0x63, 0x6f, 0x6c, 0x64, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x66, 0x61, 0x61, 0x73, 0x43, 0x6f, + 0x6c, 0x64, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x61, 0x61, 0x73, 0x5f, + 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x61, 0x61, 0x73, 0x49, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x61, 0x61, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x61, 0x61, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x66, 0x61, 0x61, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x13, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x61, 0x61, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x61, 0x61, 0x73, 0x5f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x66, 0x61, 0x61, + 0x73, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x15, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x72, 0x65, 0x67, + 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x17, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, + 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x7a, 0x6f, 0x6e, + 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x41, 0x76, + 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x2c, + 0x0a, 0x12, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x6d, 0x61, + 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x12, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x50, 0x0a, 0x12, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x12, 0x3a, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, + 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x44, 0x52, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x22, 0xa9, 0x01, 0x0a, + 0x1e, 0x4b, 0x65, 0x79, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, + 0x42, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, + 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x43, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, + 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, + 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x4d, 0x0a, 0x20, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x67, + 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x22, 0xa1, 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x3a, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, + 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, + 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x44, 0x52, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x7f, 0x0a, 0x10, 0x4b, + 0x65, 0x79, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, + 0x34, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, + 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, + 0x61, 0x6e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, + 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0xa9, 0x01, 0x0a, + 0x12, 0x53, 0x70, 0x61, 0x6e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x70, 0x61, 0x6e, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x75, 0x74, 0x63, 0x6f, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x63, 0x6f, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x35, 0x0a, 0x0b, 0x53, 0x70, 0x61, 0x6e, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x22, + 0xef, 0x03, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x57, 0x0a, 0x15, + 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6c, + 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, + 0x14, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x6d, 0x0a, 0x1d, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, + 0x77, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, + 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x1b, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, + 0x77, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, + 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x65, + 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, + 0x61, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0d, 0x6f, 0x76, 0x65, 0x72, 0x66, + 0x6c, 0x6f, 0x77, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x1f, 0x6f, 0x76, 0x65, 0x72, + 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x5f, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x1d, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x6f, 0x72, + 0x12, 0x55, 0x0a, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x5f, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x24, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x73, + 0x74, 0x69, 0x6d, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x38, 0x0a, 0x18, 0x6f, 0x76, 0x65, 0x72, 0x66, + 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x5f, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, + 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x16, 0x6f, 0x76, 0x65, 0x72, 0x66, + 0x6c, 0x6f, 0x77, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x6f, + 0x72, 0x22, 0xdf, 0x01, 0x0a, 0x0c, 0x48, 0x44, 0x52, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, + 0x61, 0x6d, 0x12, 0x34, 0x0a, 0x16, 0x6c, 0x6f, 0x77, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x72, 0x61, + 0x63, 0x6b, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x14, 0x6c, 0x6f, 0x77, 0x65, 0x73, 0x74, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x61, + 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x68, 0x69, 0x67, 0x68, + 0x65, 0x73, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x68, 0x69, 0x67, 0x68, 0x65, + 0x73, 0x74, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2f, 0x0a, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x66, 0x69, 0x63, 0x61, 0x6e, 0x74, 0x5f, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x73, + 0x69, 0x67, 0x6e, 0x69, 0x66, 0x69, 0x63, 0x61, 0x6e, 0x74, 0x46, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x03, 0x52, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x05, 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x42, 0x13, 0x48, 0x01, 0x5a, 0x0f, 0x2e, 0x2f, 0x61, 0x67, 0x67, 0x72, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_aggregation_proto_rawDescOnce sync.Once + file_proto_aggregation_proto_rawDescData = file_proto_aggregation_proto_rawDesc +) + +func file_proto_aggregation_proto_rawDescGZIP() []byte { + file_proto_aggregation_proto_rawDescOnce.Do(func() { + file_proto_aggregation_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_aggregation_proto_rawDescData) + }) + return file_proto_aggregation_proto_rawDescData +} + +var file_proto_aggregation_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_proto_aggregation_proto_goTypes = []interface{}{ + (*CombinedMetrics)(nil), // 0: elastic.apm.v1.CombinedMetrics + (*KeyedServiceMetrics)(nil), // 1: elastic.apm.v1.KeyedServiceMetrics + (*ServiceAggregationKey)(nil), // 2: elastic.apm.v1.ServiceAggregationKey + (*ServiceMetrics)(nil), // 3: elastic.apm.v1.ServiceMetrics + (*KeyedTransactionMetrics)(nil), // 4: elastic.apm.v1.KeyedTransactionMetrics + (*TransactionAggregationKey)(nil), // 5: elastic.apm.v1.TransactionAggregationKey + (*TransactionMetrics)(nil), // 6: elastic.apm.v1.TransactionMetrics + (*KeyedServiceTransactionMetrics)(nil), // 7: elastic.apm.v1.KeyedServiceTransactionMetrics + (*ServiceTransactionAggregationKey)(nil), // 8: elastic.apm.v1.ServiceTransactionAggregationKey + (*ServiceTransactionMetrics)(nil), // 9: elastic.apm.v1.ServiceTransactionMetrics + (*KeyedSpanMetrics)(nil), // 10: elastic.apm.v1.KeyedSpanMetrics + (*SpanAggregationKey)(nil), // 11: elastic.apm.v1.SpanAggregationKey + (*SpanMetrics)(nil), // 12: elastic.apm.v1.SpanMetrics + (*Overflow)(nil), // 13: elastic.apm.v1.Overflow + (*HDRHistogram)(nil), // 14: elastic.apm.v1.HDRHistogram +} +var file_proto_aggregation_proto_depIdxs = []int32{ + 1, // 0: elastic.apm.v1.CombinedMetrics.service_metrics:type_name -> elastic.apm.v1.KeyedServiceMetrics + 13, // 1: elastic.apm.v1.CombinedMetrics.overflow_services:type_name -> elastic.apm.v1.Overflow + 2, // 2: elastic.apm.v1.KeyedServiceMetrics.key:type_name -> elastic.apm.v1.ServiceAggregationKey + 3, // 3: elastic.apm.v1.KeyedServiceMetrics.metrics:type_name -> elastic.apm.v1.ServiceMetrics + 13, // 4: elastic.apm.v1.ServiceMetrics.overflow_groups:type_name -> elastic.apm.v1.Overflow + 4, // 5: elastic.apm.v1.ServiceMetrics.transaction_metrics:type_name -> elastic.apm.v1.KeyedTransactionMetrics + 7, // 6: elastic.apm.v1.ServiceMetrics.service_transaction_metrics:type_name -> elastic.apm.v1.KeyedServiceTransactionMetrics + 10, // 7: elastic.apm.v1.ServiceMetrics.span_metrics:type_name -> elastic.apm.v1.KeyedSpanMetrics + 5, // 8: elastic.apm.v1.KeyedTransactionMetrics.key:type_name -> elastic.apm.v1.TransactionAggregationKey + 6, // 9: elastic.apm.v1.KeyedTransactionMetrics.metrics:type_name -> elastic.apm.v1.TransactionMetrics + 14, // 10: elastic.apm.v1.TransactionMetrics.histogram:type_name -> elastic.apm.v1.HDRHistogram + 8, // 11: elastic.apm.v1.KeyedServiceTransactionMetrics.key:type_name -> elastic.apm.v1.ServiceTransactionAggregationKey + 9, // 12: elastic.apm.v1.KeyedServiceTransactionMetrics.metrics:type_name -> elastic.apm.v1.ServiceTransactionMetrics + 14, // 13: elastic.apm.v1.ServiceTransactionMetrics.histogram:type_name -> elastic.apm.v1.HDRHistogram + 11, // 14: elastic.apm.v1.KeyedSpanMetrics.key:type_name -> elastic.apm.v1.SpanAggregationKey + 12, // 15: elastic.apm.v1.KeyedSpanMetrics.metrics:type_name -> elastic.apm.v1.SpanMetrics + 6, // 16: elastic.apm.v1.Overflow.overflow_transactions:type_name -> elastic.apm.v1.TransactionMetrics + 9, // 17: elastic.apm.v1.Overflow.overflow_service_transactions:type_name -> elastic.apm.v1.ServiceTransactionMetrics + 12, // 18: elastic.apm.v1.Overflow.overflow_spans:type_name -> elastic.apm.v1.SpanMetrics + 19, // [19:19] is the sub-list for method output_type + 19, // [19:19] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name +} + +func init() { file_proto_aggregation_proto_init() } +func file_proto_aggregation_proto_init() { + if File_proto_aggregation_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_aggregation_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CombinedMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyedServiceMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceAggregationKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyedTransactionMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionAggregationKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyedServiceTransactionMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceTransactionAggregationKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceTransactionMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyedSpanMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SpanAggregationKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SpanMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Overflow); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_aggregation_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HDRHistogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_aggregation_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_aggregation_proto_goTypes, + DependencyIndexes: file_proto_aggregation_proto_depIdxs, + MessageInfos: file_proto_aggregation_proto_msgTypes, + }.Build() + File_proto_aggregation_proto = out.File + file_proto_aggregation_proto_rawDesc = nil + file_proto_aggregation_proto_goTypes = nil + file_proto_aggregation_proto_depIdxs = nil +} diff --git a/copy/apm-aggregation/aggregationpb/aggregation_vtproto.pb.go b/copy/apm-aggregation/aggregationpb/aggregation_vtproto.pb.go new file mode 100644 index 00000000000..c1ee4429003 --- /dev/null +++ b/copy/apm-aggregation/aggregationpb/aggregation_vtproto.pb.go @@ -0,0 +1,5590 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.5.0 +// source: proto/aggregation.proto + +package aggregationpb + +import ( + binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + bits "math/bits" + sync "sync" + + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *CombinedMetrics) CloneVT() *CombinedMetrics { + if m == nil { + return (*CombinedMetrics)(nil) + } + r := &CombinedMetrics{ + OverflowServices: m.OverflowServices.CloneVT(), + EventsTotal: m.EventsTotal, + YoungestEventTimestamp: m.YoungestEventTimestamp, + } + if rhs := m.ServiceMetrics; rhs != nil { + tmpContainer := make([]*KeyedServiceMetrics, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ServiceMetrics = tmpContainer + } + if rhs := m.OverflowServicesEstimator; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.OverflowServicesEstimator = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CombinedMetrics) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *KeyedServiceMetrics) CloneVT() *KeyedServiceMetrics { + if m == nil { + return (*KeyedServiceMetrics)(nil) + } + r := &KeyedServiceMetrics{ + Key: m.Key.CloneVT(), + Metrics: m.Metrics.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *KeyedServiceMetrics) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ServiceAggregationKey) CloneVT() *ServiceAggregationKey { + if m == nil { + return (*ServiceAggregationKey)(nil) + } + r := &ServiceAggregationKey{ + Timestamp: m.Timestamp, + ServiceName: m.ServiceName, + ServiceEnvironment: m.ServiceEnvironment, + ServiceLanguageName: m.ServiceLanguageName, + AgentName: m.AgentName, + } + if rhs := m.GlobalLabelsStr; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.GlobalLabelsStr = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ServiceAggregationKey) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ServiceMetrics) CloneVT() *ServiceMetrics { + if m == nil { + return (*ServiceMetrics)(nil) + } + r := &ServiceMetrics{ + OverflowGroups: m.OverflowGroups.CloneVT(), + } + if rhs := m.TransactionMetrics; rhs != nil { + tmpContainer := make([]*KeyedTransactionMetrics, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TransactionMetrics = tmpContainer + } + if rhs := m.ServiceTransactionMetrics; rhs != nil { + tmpContainer := make([]*KeyedServiceTransactionMetrics, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ServiceTransactionMetrics = tmpContainer + } + if rhs := m.SpanMetrics; rhs != nil { + tmpContainer := make([]*KeyedSpanMetrics, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.SpanMetrics = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ServiceMetrics) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *KeyedTransactionMetrics) CloneVT() *KeyedTransactionMetrics { + if m == nil { + return (*KeyedTransactionMetrics)(nil) + } + r := &KeyedTransactionMetrics{ + Key: m.Key.CloneVT(), + Metrics: m.Metrics.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *KeyedTransactionMetrics) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TransactionAggregationKey) CloneVT() *TransactionAggregationKey { + if m == nil { + return (*TransactionAggregationKey)(nil) + } + r := &TransactionAggregationKey{ + TraceRoot: m.TraceRoot, + ContainerId: m.ContainerId, + KubernetesPodName: m.KubernetesPodName, + ServiceVersion: m.ServiceVersion, + ServiceNodeName: m.ServiceNodeName, + ServiceRuntimeName: m.ServiceRuntimeName, + ServiceRuntimeVersion: m.ServiceRuntimeVersion, + ServiceLanguageVersion: m.ServiceLanguageVersion, + HostHostname: m.HostHostname, + HostName: m.HostName, + HostOsPlatform: m.HostOsPlatform, + EventOutcome: m.EventOutcome, + TransactionName: m.TransactionName, + TransactionType: m.TransactionType, + TransactionResult: m.TransactionResult, + FaasColdstart: m.FaasColdstart, + FaasId: m.FaasId, + FaasName: m.FaasName, + FaasVersion: m.FaasVersion, + FaasTriggerType: m.FaasTriggerType, + CloudProvider: m.CloudProvider, + CloudRegion: m.CloudRegion, + CloudAvailabilityZone: m.CloudAvailabilityZone, + CloudServiceName: m.CloudServiceName, + CloudAccountId: m.CloudAccountId, + CloudAccountName: m.CloudAccountName, + CloudMachineType: m.CloudMachineType, + CloudProjectId: m.CloudProjectId, + CloudProjectName: m.CloudProjectName, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TransactionAggregationKey) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TransactionMetrics) CloneVT() *TransactionMetrics { + if m == nil { + return (*TransactionMetrics)(nil) + } + r := &TransactionMetrics{ + Histogram: m.Histogram.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TransactionMetrics) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *KeyedServiceTransactionMetrics) CloneVT() *KeyedServiceTransactionMetrics { + if m == nil { + return (*KeyedServiceTransactionMetrics)(nil) + } + r := &KeyedServiceTransactionMetrics{ + Key: m.Key.CloneVT(), + Metrics: m.Metrics.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *KeyedServiceTransactionMetrics) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ServiceTransactionAggregationKey) CloneVT() *ServiceTransactionAggregationKey { + if m == nil { + return (*ServiceTransactionAggregationKey)(nil) + } + r := &ServiceTransactionAggregationKey{ + TransactionType: m.TransactionType, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ServiceTransactionAggregationKey) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ServiceTransactionMetrics) CloneVT() *ServiceTransactionMetrics { + if m == nil { + return (*ServiceTransactionMetrics)(nil) + } + r := &ServiceTransactionMetrics{ + Histogram: m.Histogram.CloneVT(), + FailureCount: m.FailureCount, + SuccessCount: m.SuccessCount, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ServiceTransactionMetrics) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *KeyedSpanMetrics) CloneVT() *KeyedSpanMetrics { + if m == nil { + return (*KeyedSpanMetrics)(nil) + } + r := &KeyedSpanMetrics{ + Key: m.Key.CloneVT(), + Metrics: m.Metrics.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *KeyedSpanMetrics) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SpanAggregationKey) CloneVT() *SpanAggregationKey { + if m == nil { + return (*SpanAggregationKey)(nil) + } + r := &SpanAggregationKey{ + SpanName: m.SpanName, + Outcome: m.Outcome, + TargetType: m.TargetType, + TargetName: m.TargetName, + Resource: m.Resource, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SpanAggregationKey) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SpanMetrics) CloneVT() *SpanMetrics { + if m == nil { + return (*SpanMetrics)(nil) + } + r := &SpanMetrics{ + Count: m.Count, + Sum: m.Sum, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SpanMetrics) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Overflow) CloneVT() *Overflow { + if m == nil { + return (*Overflow)(nil) + } + r := &Overflow{ + OverflowTransactions: m.OverflowTransactions.CloneVT(), + OverflowServiceTransactions: m.OverflowServiceTransactions.CloneVT(), + OverflowSpans: m.OverflowSpans.CloneVT(), + } + if rhs := m.OverflowTransactionsEstimator; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.OverflowTransactionsEstimator = tmpBytes + } + if rhs := m.OverflowServiceTransactionsEstimator; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.OverflowServiceTransactionsEstimator = tmpBytes + } + if rhs := m.OverflowSpansEstimator; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.OverflowSpansEstimator = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Overflow) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *HDRHistogram) CloneVT() *HDRHistogram { + if m == nil { + return (*HDRHistogram)(nil) + } + r := &HDRHistogram{ + LowestTrackableValue: m.LowestTrackableValue, + HighestTrackableValue: m.HighestTrackableValue, + SignificantFigures: m.SignificantFigures, + } + if rhs := m.Counts; rhs != nil { + tmpContainer := make([]int64, len(rhs)) + copy(tmpContainer, rhs) + r.Counts = tmpContainer + } + if rhs := m.Buckets; rhs != nil { + tmpContainer := make([]int32, len(rhs)) + copy(tmpContainer, rhs) + r.Buckets = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *HDRHistogram) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CombinedMetrics) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CombinedMetrics) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CombinedMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.YoungestEventTimestamp != 0 { + i = encodeVarint(dAtA, i, uint64(m.YoungestEventTimestamp)) + i-- + dAtA[i] = 0x28 + } + if m.EventsTotal != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.EventsTotal)))) + i-- + dAtA[i] = 0x21 + } + if len(m.OverflowServicesEstimator) > 0 { + i -= len(m.OverflowServicesEstimator) + copy(dAtA[i:], m.OverflowServicesEstimator) + i = encodeVarint(dAtA, i, uint64(len(m.OverflowServicesEstimator))) + i-- + dAtA[i] = 0x1a + } + if m.OverflowServices != nil { + size, err := m.OverflowServices.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.ServiceMetrics) > 0 { + for iNdEx := len(m.ServiceMetrics) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ServiceMetrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *KeyedServiceMetrics) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyedServiceMetrics) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *KeyedServiceMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metrics != nil { + size, err := m.Metrics.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Key != nil { + size, err := m.Key.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceAggregationKey) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAggregationKey) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ServiceAggregationKey) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.GlobalLabelsStr) > 0 { + i -= len(m.GlobalLabelsStr) + copy(dAtA[i:], m.GlobalLabelsStr) + i = encodeVarint(dAtA, i, uint64(len(m.GlobalLabelsStr))) + i-- + dAtA[i] = 0x32 + } + if len(m.AgentName) > 0 { + i -= len(m.AgentName) + copy(dAtA[i:], m.AgentName) + i = encodeVarint(dAtA, i, uint64(len(m.AgentName))) + i-- + dAtA[i] = 0x2a + } + if len(m.ServiceLanguageName) > 0 { + i -= len(m.ServiceLanguageName) + copy(dAtA[i:], m.ServiceLanguageName) + i = encodeVarint(dAtA, i, uint64(len(m.ServiceLanguageName))) + i-- + dAtA[i] = 0x22 + } + if len(m.ServiceEnvironment) > 0 { + i -= len(m.ServiceEnvironment) + copy(dAtA[i:], m.ServiceEnvironment) + i = encodeVarint(dAtA, i, uint64(len(m.ServiceEnvironment))) + i-- + dAtA[i] = 0x1a + } + if len(m.ServiceName) > 0 { + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarint(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ServiceMetrics) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceMetrics) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ServiceMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SpanMetrics) > 0 { + for iNdEx := len(m.SpanMetrics) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SpanMetrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.ServiceTransactionMetrics) > 0 { + for iNdEx := len(m.ServiceTransactionMetrics) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ServiceTransactionMetrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.TransactionMetrics) > 0 { + for iNdEx := len(m.TransactionMetrics) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TransactionMetrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.OverflowGroups != nil { + size, err := m.OverflowGroups.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyedTransactionMetrics) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyedTransactionMetrics) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *KeyedTransactionMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metrics != nil { + size, err := m.Metrics.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Key != nil { + size, err := m.Key.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TransactionAggregationKey) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransactionAggregationKey) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TransactionAggregationKey) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CloudProjectName) > 0 { + i -= len(m.CloudProjectName) + copy(dAtA[i:], m.CloudProjectName) + i = encodeVarint(dAtA, i, uint64(len(m.CloudProjectName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if len(m.CloudProjectId) > 0 { + i -= len(m.CloudProjectId) + copy(dAtA[i:], m.CloudProjectId) + i = encodeVarint(dAtA, i, uint64(len(m.CloudProjectId))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if len(m.CloudMachineType) > 0 { + i -= len(m.CloudMachineType) + copy(dAtA[i:], m.CloudMachineType) + i = encodeVarint(dAtA, i, uint64(len(m.CloudMachineType))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if len(m.CloudAccountName) > 0 { + i -= len(m.CloudAccountName) + copy(dAtA[i:], m.CloudAccountName) + i = encodeVarint(dAtA, i, uint64(len(m.CloudAccountName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if len(m.CloudAccountId) > 0 { + i -= len(m.CloudAccountId) + copy(dAtA[i:], m.CloudAccountId) + i = encodeVarint(dAtA, i, uint64(len(m.CloudAccountId))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if len(m.CloudServiceName) > 0 { + i -= len(m.CloudServiceName) + copy(dAtA[i:], m.CloudServiceName) + i = encodeVarint(dAtA, i, uint64(len(m.CloudServiceName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.CloudAvailabilityZone) > 0 { + i -= len(m.CloudAvailabilityZone) + copy(dAtA[i:], m.CloudAvailabilityZone) + i = encodeVarint(dAtA, i, uint64(len(m.CloudAvailabilityZone))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if len(m.CloudRegion) > 0 { + i -= len(m.CloudRegion) + copy(dAtA[i:], m.CloudRegion) + i = encodeVarint(dAtA, i, uint64(len(m.CloudRegion))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if len(m.CloudProvider) > 0 { + i -= len(m.CloudProvider) + copy(dAtA[i:], m.CloudProvider) + i = encodeVarint(dAtA, i, uint64(len(m.CloudProvider))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if len(m.FaasTriggerType) > 0 { + i -= len(m.FaasTriggerType) + copy(dAtA[i:], m.FaasTriggerType) + i = encodeVarint(dAtA, i, uint64(len(m.FaasTriggerType))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if len(m.FaasVersion) > 0 { + i -= len(m.FaasVersion) + copy(dAtA[i:], m.FaasVersion) + i = encodeVarint(dAtA, i, uint64(len(m.FaasVersion))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.FaasName) > 0 { + i -= len(m.FaasName) + copy(dAtA[i:], m.FaasName) + i = encodeVarint(dAtA, i, uint64(len(m.FaasName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if len(m.FaasId) > 0 { + i -= len(m.FaasId) + copy(dAtA[i:], m.FaasId) + i = encodeVarint(dAtA, i, uint64(len(m.FaasId))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.FaasColdstart != 0 { + i = encodeVarint(dAtA, i, uint64(m.FaasColdstart)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if len(m.TransactionResult) > 0 { + i -= len(m.TransactionResult) + copy(dAtA[i:], m.TransactionResult) + i = encodeVarint(dAtA, i, uint64(len(m.TransactionResult))) + i-- + dAtA[i] = 0x7a + } + if len(m.TransactionType) > 0 { + i -= len(m.TransactionType) + copy(dAtA[i:], m.TransactionType) + i = encodeVarint(dAtA, i, uint64(len(m.TransactionType))) + i-- + dAtA[i] = 0x72 + } + if len(m.TransactionName) > 0 { + i -= len(m.TransactionName) + copy(dAtA[i:], m.TransactionName) + i = encodeVarint(dAtA, i, uint64(len(m.TransactionName))) + i-- + dAtA[i] = 0x6a + } + if len(m.EventOutcome) > 0 { + i -= len(m.EventOutcome) + copy(dAtA[i:], m.EventOutcome) + i = encodeVarint(dAtA, i, uint64(len(m.EventOutcome))) + i-- + dAtA[i] = 0x62 + } + if len(m.HostOsPlatform) > 0 { + i -= len(m.HostOsPlatform) + copy(dAtA[i:], m.HostOsPlatform) + i = encodeVarint(dAtA, i, uint64(len(m.HostOsPlatform))) + i-- + dAtA[i] = 0x5a + } + if len(m.HostName) > 0 { + i -= len(m.HostName) + copy(dAtA[i:], m.HostName) + i = encodeVarint(dAtA, i, uint64(len(m.HostName))) + i-- + dAtA[i] = 0x52 + } + if len(m.HostHostname) > 0 { + i -= len(m.HostHostname) + copy(dAtA[i:], m.HostHostname) + i = encodeVarint(dAtA, i, uint64(len(m.HostHostname))) + i-- + dAtA[i] = 0x4a + } + if len(m.ServiceLanguageVersion) > 0 { + i -= len(m.ServiceLanguageVersion) + copy(dAtA[i:], m.ServiceLanguageVersion) + i = encodeVarint(dAtA, i, uint64(len(m.ServiceLanguageVersion))) + i-- + dAtA[i] = 0x42 + } + if len(m.ServiceRuntimeVersion) > 0 { + i -= len(m.ServiceRuntimeVersion) + copy(dAtA[i:], m.ServiceRuntimeVersion) + i = encodeVarint(dAtA, i, uint64(len(m.ServiceRuntimeVersion))) + i-- + dAtA[i] = 0x3a + } + if len(m.ServiceRuntimeName) > 0 { + i -= len(m.ServiceRuntimeName) + copy(dAtA[i:], m.ServiceRuntimeName) + i = encodeVarint(dAtA, i, uint64(len(m.ServiceRuntimeName))) + i-- + dAtA[i] = 0x32 + } + if len(m.ServiceNodeName) > 0 { + i -= len(m.ServiceNodeName) + copy(dAtA[i:], m.ServiceNodeName) + i = encodeVarint(dAtA, i, uint64(len(m.ServiceNodeName))) + i-- + dAtA[i] = 0x2a + } + if len(m.ServiceVersion) > 0 { + i -= len(m.ServiceVersion) + copy(dAtA[i:], m.ServiceVersion) + i = encodeVarint(dAtA, i, uint64(len(m.ServiceVersion))) + i-- + dAtA[i] = 0x22 + } + if len(m.KubernetesPodName) > 0 { + i -= len(m.KubernetesPodName) + copy(dAtA[i:], m.KubernetesPodName) + i = encodeVarint(dAtA, i, uint64(len(m.KubernetesPodName))) + i-- + dAtA[i] = 0x1a + } + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarint(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0x12 + } + if m.TraceRoot { + i-- + if m.TraceRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TransactionMetrics) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransactionMetrics) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TransactionMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Histogram != nil { + size, err := m.Histogram.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyedServiceTransactionMetrics) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyedServiceTransactionMetrics) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *KeyedServiceTransactionMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metrics != nil { + size, err := m.Metrics.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Key != nil { + size, err := m.Key.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceTransactionAggregationKey) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceTransactionAggregationKey) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ServiceTransactionAggregationKey) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TransactionType) > 0 { + i -= len(m.TransactionType) + copy(dAtA[i:], m.TransactionType) + i = encodeVarint(dAtA, i, uint64(len(m.TransactionType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceTransactionMetrics) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceTransactionMetrics) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ServiceTransactionMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SuccessCount != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SuccessCount)))) + i-- + dAtA[i] = 0x19 + } + if m.FailureCount != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.FailureCount)))) + i-- + dAtA[i] = 0x11 + } + if m.Histogram != nil { + size, err := m.Histogram.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyedSpanMetrics) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyedSpanMetrics) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *KeyedSpanMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metrics != nil { + size, err := m.Metrics.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Key != nil { + size, err := m.Key.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SpanAggregationKey) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SpanAggregationKey) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SpanAggregationKey) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Resource) > 0 { + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarint(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + } + if len(m.TargetName) > 0 { + i -= len(m.TargetName) + copy(dAtA[i:], m.TargetName) + i = encodeVarint(dAtA, i, uint64(len(m.TargetName))) + i-- + dAtA[i] = 0x22 + } + if len(m.TargetType) > 0 { + i -= len(m.TargetType) + copy(dAtA[i:], m.TargetType) + i = encodeVarint(dAtA, i, uint64(len(m.TargetType))) + i-- + dAtA[i] = 0x1a + } + if len(m.Outcome) > 0 { + i -= len(m.Outcome) + copy(dAtA[i:], m.Outcome) + i = encodeVarint(dAtA, i, uint64(len(m.Outcome))) + i-- + dAtA[i] = 0x12 + } + if len(m.SpanName) > 0 { + i -= len(m.SpanName) + copy(dAtA[i:], m.SpanName) + i = encodeVarint(dAtA, i, uint64(len(m.SpanName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SpanMetrics) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SpanMetrics) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SpanMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Sum != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x11 + } + if m.Count != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Count)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Overflow) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Overflow) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Overflow) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.OverflowSpansEstimator) > 0 { + i -= len(m.OverflowSpansEstimator) + copy(dAtA[i:], m.OverflowSpansEstimator) + i = encodeVarint(dAtA, i, uint64(len(m.OverflowSpansEstimator))) + i-- + dAtA[i] = 0x32 + } + if len(m.OverflowServiceTransactionsEstimator) > 0 { + i -= len(m.OverflowServiceTransactionsEstimator) + copy(dAtA[i:], m.OverflowServiceTransactionsEstimator) + i = encodeVarint(dAtA, i, uint64(len(m.OverflowServiceTransactionsEstimator))) + i-- + dAtA[i] = 0x2a + } + if len(m.OverflowTransactionsEstimator) > 0 { + i -= len(m.OverflowTransactionsEstimator) + copy(dAtA[i:], m.OverflowTransactionsEstimator) + i = encodeVarint(dAtA, i, uint64(len(m.OverflowTransactionsEstimator))) + i-- + dAtA[i] = 0x22 + } + if m.OverflowSpans != nil { + size, err := m.OverflowSpans.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.OverflowServiceTransactions != nil { + size, err := m.OverflowServiceTransactions.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.OverflowTransactions != nil { + size, err := m.OverflowTransactions.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HDRHistogram) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HDRHistogram) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *HDRHistogram) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Buckets) > 0 { + var pksize2 int + for _, num := range m.Buckets { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.Buckets { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x2a + } + if len(m.Counts) > 0 { + var pksize4 int + for _, num := range m.Counts { + pksize4 += sov(uint64(num)) + } + i -= pksize4 + j3 := i + for _, num1 := range m.Counts { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j3] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j3++ + } + dAtA[j3] = uint8(num) + j3++ + } + i = encodeVarint(dAtA, i, uint64(pksize4)) + i-- + dAtA[i] = 0x22 + } + if m.SignificantFigures != 0 { + i = encodeVarint(dAtA, i, uint64(m.SignificantFigures)) + i-- + dAtA[i] = 0x18 + } + if m.HighestTrackableValue != 0 { + i = encodeVarint(dAtA, i, uint64(m.HighestTrackableValue)) + i-- + dAtA[i] = 0x10 + } + if m.LowestTrackableValue != 0 { + i = encodeVarint(dAtA, i, uint64(m.LowestTrackableValue)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} + +var vtprotoPool_CombinedMetrics = sync.Pool{ + New: func() interface{} { + return &CombinedMetrics{} + }, +} + +func (m *CombinedMetrics) ResetVT() { + for _, mm := range m.ServiceMetrics { + mm.ResetVT() + } + f0 := m.ServiceMetrics[:0] + m.OverflowServices.ReturnToVTPool() + f1 := m.OverflowServicesEstimator[:0] + m.Reset() + m.ServiceMetrics = f0 + m.OverflowServicesEstimator = f1 +} +func (m *CombinedMetrics) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_CombinedMetrics.Put(m) + } +} +func CombinedMetricsFromVTPool() *CombinedMetrics { + return vtprotoPool_CombinedMetrics.Get().(*CombinedMetrics) +} + +var vtprotoPool_KeyedServiceMetrics = sync.Pool{ + New: func() interface{} { + return &KeyedServiceMetrics{} + }, +} + +func (m *KeyedServiceMetrics) ResetVT() { + m.Key.ReturnToVTPool() + m.Metrics.ReturnToVTPool() + m.Reset() +} +func (m *KeyedServiceMetrics) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_KeyedServiceMetrics.Put(m) + } +} +func KeyedServiceMetricsFromVTPool() *KeyedServiceMetrics { + return vtprotoPool_KeyedServiceMetrics.Get().(*KeyedServiceMetrics) +} + +var vtprotoPool_ServiceAggregationKey = sync.Pool{ + New: func() interface{} { + return &ServiceAggregationKey{} + }, +} + +func (m *ServiceAggregationKey) ResetVT() { + f0 := m.GlobalLabelsStr[:0] + m.Reset() + m.GlobalLabelsStr = f0 +} +func (m *ServiceAggregationKey) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_ServiceAggregationKey.Put(m) + } +} +func ServiceAggregationKeyFromVTPool() *ServiceAggregationKey { + return vtprotoPool_ServiceAggregationKey.Get().(*ServiceAggregationKey) +} + +var vtprotoPool_ServiceMetrics = sync.Pool{ + New: func() interface{} { + return &ServiceMetrics{} + }, +} + +func (m *ServiceMetrics) ResetVT() { + m.OverflowGroups.ReturnToVTPool() + for _, mm := range m.TransactionMetrics { + mm.ResetVT() + } + f0 := m.TransactionMetrics[:0] + for _, mm := range m.ServiceTransactionMetrics { + mm.ResetVT() + } + f1 := m.ServiceTransactionMetrics[:0] + for _, mm := range m.SpanMetrics { + mm.ResetVT() + } + f2 := m.SpanMetrics[:0] + m.Reset() + m.TransactionMetrics = f0 + m.ServiceTransactionMetrics = f1 + m.SpanMetrics = f2 +} +func (m *ServiceMetrics) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_ServiceMetrics.Put(m) + } +} +func ServiceMetricsFromVTPool() *ServiceMetrics { + return vtprotoPool_ServiceMetrics.Get().(*ServiceMetrics) +} + +var vtprotoPool_KeyedTransactionMetrics = sync.Pool{ + New: func() interface{} { + return &KeyedTransactionMetrics{} + }, +} + +func (m *KeyedTransactionMetrics) ResetVT() { + m.Key.ReturnToVTPool() + m.Metrics.ReturnToVTPool() + m.Reset() +} +func (m *KeyedTransactionMetrics) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_KeyedTransactionMetrics.Put(m) + } +} +func KeyedTransactionMetricsFromVTPool() *KeyedTransactionMetrics { + return vtprotoPool_KeyedTransactionMetrics.Get().(*KeyedTransactionMetrics) +} + +var vtprotoPool_TransactionAggregationKey = sync.Pool{ + New: func() interface{} { + return &TransactionAggregationKey{} + }, +} + +func (m *TransactionAggregationKey) ResetVT() { + m.Reset() +} +func (m *TransactionAggregationKey) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_TransactionAggregationKey.Put(m) + } +} +func TransactionAggregationKeyFromVTPool() *TransactionAggregationKey { + return vtprotoPool_TransactionAggregationKey.Get().(*TransactionAggregationKey) +} + +var vtprotoPool_TransactionMetrics = sync.Pool{ + New: func() interface{} { + return &TransactionMetrics{} + }, +} + +func (m *TransactionMetrics) ResetVT() { + m.Histogram.ReturnToVTPool() + m.Reset() +} +func (m *TransactionMetrics) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_TransactionMetrics.Put(m) + } +} +func TransactionMetricsFromVTPool() *TransactionMetrics { + return vtprotoPool_TransactionMetrics.Get().(*TransactionMetrics) +} + +var vtprotoPool_KeyedServiceTransactionMetrics = sync.Pool{ + New: func() interface{} { + return &KeyedServiceTransactionMetrics{} + }, +} + +func (m *KeyedServiceTransactionMetrics) ResetVT() { + m.Key.ReturnToVTPool() + m.Metrics.ReturnToVTPool() + m.Reset() +} +func (m *KeyedServiceTransactionMetrics) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_KeyedServiceTransactionMetrics.Put(m) + } +} +func KeyedServiceTransactionMetricsFromVTPool() *KeyedServiceTransactionMetrics { + return vtprotoPool_KeyedServiceTransactionMetrics.Get().(*KeyedServiceTransactionMetrics) +} + +var vtprotoPool_ServiceTransactionAggregationKey = sync.Pool{ + New: func() interface{} { + return &ServiceTransactionAggregationKey{} + }, +} + +func (m *ServiceTransactionAggregationKey) ResetVT() { + m.Reset() +} +func (m *ServiceTransactionAggregationKey) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_ServiceTransactionAggregationKey.Put(m) + } +} +func ServiceTransactionAggregationKeyFromVTPool() *ServiceTransactionAggregationKey { + return vtprotoPool_ServiceTransactionAggregationKey.Get().(*ServiceTransactionAggregationKey) +} + +var vtprotoPool_ServiceTransactionMetrics = sync.Pool{ + New: func() interface{} { + return &ServiceTransactionMetrics{} + }, +} + +func (m *ServiceTransactionMetrics) ResetVT() { + m.Histogram.ReturnToVTPool() + m.Reset() +} +func (m *ServiceTransactionMetrics) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_ServiceTransactionMetrics.Put(m) + } +} +func ServiceTransactionMetricsFromVTPool() *ServiceTransactionMetrics { + return vtprotoPool_ServiceTransactionMetrics.Get().(*ServiceTransactionMetrics) +} + +var vtprotoPool_KeyedSpanMetrics = sync.Pool{ + New: func() interface{} { + return &KeyedSpanMetrics{} + }, +} + +func (m *KeyedSpanMetrics) ResetVT() { + m.Key.ReturnToVTPool() + m.Metrics.ReturnToVTPool() + m.Reset() +} +func (m *KeyedSpanMetrics) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_KeyedSpanMetrics.Put(m) + } +} +func KeyedSpanMetricsFromVTPool() *KeyedSpanMetrics { + return vtprotoPool_KeyedSpanMetrics.Get().(*KeyedSpanMetrics) +} + +var vtprotoPool_SpanAggregationKey = sync.Pool{ + New: func() interface{} { + return &SpanAggregationKey{} + }, +} + +func (m *SpanAggregationKey) ResetVT() { + m.Reset() +} +func (m *SpanAggregationKey) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_SpanAggregationKey.Put(m) + } +} +func SpanAggregationKeyFromVTPool() *SpanAggregationKey { + return vtprotoPool_SpanAggregationKey.Get().(*SpanAggregationKey) +} + +var vtprotoPool_SpanMetrics = sync.Pool{ + New: func() interface{} { + return &SpanMetrics{} + }, +} + +func (m *SpanMetrics) ResetVT() { + m.Reset() +} +func (m *SpanMetrics) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_SpanMetrics.Put(m) + } +} +func SpanMetricsFromVTPool() *SpanMetrics { + return vtprotoPool_SpanMetrics.Get().(*SpanMetrics) +} + +var vtprotoPool_Overflow = sync.Pool{ + New: func() interface{} { + return &Overflow{} + }, +} + +func (m *Overflow) ResetVT() { + m.OverflowTransactions.ReturnToVTPool() + m.OverflowServiceTransactions.ReturnToVTPool() + m.OverflowSpans.ReturnToVTPool() + f0 := m.OverflowTransactionsEstimator[:0] + f1 := m.OverflowServiceTransactionsEstimator[:0] + f2 := m.OverflowSpansEstimator[:0] + m.Reset() + m.OverflowTransactionsEstimator = f0 + m.OverflowServiceTransactionsEstimator = f1 + m.OverflowSpansEstimator = f2 +} +func (m *Overflow) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_Overflow.Put(m) + } +} +func OverflowFromVTPool() *Overflow { + return vtprotoPool_Overflow.Get().(*Overflow) +} + +var vtprotoPool_HDRHistogram = sync.Pool{ + New: func() interface{} { + return &HDRHistogram{} + }, +} + +func (m *HDRHistogram) ResetVT() { + f0 := m.Counts[:0] + f1 := m.Buckets[:0] + m.Reset() + m.Counts = f0 + m.Buckets = f1 +} +func (m *HDRHistogram) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_HDRHistogram.Put(m) + } +} +func HDRHistogramFromVTPool() *HDRHistogram { + return vtprotoPool_HDRHistogram.Get().(*HDRHistogram) +} +func (m *CombinedMetrics) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ServiceMetrics) > 0 { + for _, e := range m.ServiceMetrics { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.OverflowServices != nil { + l = m.OverflowServices.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.OverflowServicesEstimator) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.EventsTotal != 0 { + n += 9 + } + if m.YoungestEventTimestamp != 0 { + n += 1 + sov(uint64(m.YoungestEventTimestamp)) + } + n += len(m.unknownFields) + return n +} + +func (m *KeyedServiceMetrics) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Key != nil { + l = m.Key.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Metrics != nil { + l = m.Metrics.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ServiceAggregationKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sov(uint64(m.Timestamp)) + } + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ServiceEnvironment) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ServiceLanguageName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.AgentName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.GlobalLabelsStr) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ServiceMetrics) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OverflowGroups != nil { + l = m.OverflowGroups.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.TransactionMetrics) > 0 { + for _, e := range m.TransactionMetrics { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ServiceTransactionMetrics) > 0 { + for _, e := range m.ServiceTransactionMetrics { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.SpanMetrics) > 0 { + for _, e := range m.SpanMetrics { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *KeyedTransactionMetrics) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Key != nil { + l = m.Key.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Metrics != nil { + l = m.Metrics.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TransactionAggregationKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TraceRoot { + n += 2 + } + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.KubernetesPodName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ServiceVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ServiceNodeName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ServiceRuntimeName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ServiceRuntimeVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ServiceLanguageVersion) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.HostHostname) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.HostName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.HostOsPlatform) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.EventOutcome) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TransactionName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TransactionType) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TransactionResult) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.FaasColdstart != 0 { + n += 2 + sov(uint64(m.FaasColdstart)) + } + l = len(m.FaasId) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.FaasName) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.FaasVersion) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.FaasTriggerType) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.CloudProvider) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.CloudRegion) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.CloudAvailabilityZone) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.CloudServiceName) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.CloudAccountId) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.CloudAccountName) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.CloudMachineType) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.CloudProjectId) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.CloudProjectName) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TransactionMetrics) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Histogram != nil { + l = m.Histogram.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *KeyedServiceTransactionMetrics) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Key != nil { + l = m.Key.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Metrics != nil { + l = m.Metrics.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ServiceTransactionAggregationKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TransactionType) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ServiceTransactionMetrics) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Histogram != nil { + l = m.Histogram.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.FailureCount != 0 { + n += 9 + } + if m.SuccessCount != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *KeyedSpanMetrics) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Key != nil { + l = m.Key.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Metrics != nil { + l = m.Metrics.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SpanAggregationKey) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SpanName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Outcome) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetType) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Resource) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SpanMetrics) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != 0 { + n += 9 + } + if m.Sum != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *Overflow) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OverflowTransactions != nil { + l = m.OverflowTransactions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.OverflowServiceTransactions != nil { + l = m.OverflowServiceTransactions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.OverflowSpans != nil { + l = m.OverflowSpans.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.OverflowTransactionsEstimator) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.OverflowServiceTransactionsEstimator) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.OverflowSpansEstimator) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HDRHistogram) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LowestTrackableValue != 0 { + n += 1 + sov(uint64(m.LowestTrackableValue)) + } + if m.HighestTrackableValue != 0 { + n += 1 + sov(uint64(m.HighestTrackableValue)) + } + if m.SignificantFigures != 0 { + n += 1 + sov(uint64(m.SignificantFigures)) + } + if len(m.Counts) > 0 { + l = 0 + for _, e := range m.Counts { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if len(m.Buckets) > 0 { + l = 0 + for _, e := range m.Buckets { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CombinedMetrics) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CombinedMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CombinedMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if len(m.ServiceMetrics) == cap(m.ServiceMetrics) { + m.ServiceMetrics = append(m.ServiceMetrics, &KeyedServiceMetrics{}) + } else { + m.ServiceMetrics = m.ServiceMetrics[:len(m.ServiceMetrics)+1] + if m.ServiceMetrics[len(m.ServiceMetrics)-1] == nil { + m.ServiceMetrics[len(m.ServiceMetrics)-1] = &KeyedServiceMetrics{} + } + } + if err := m.ServiceMetrics[len(m.ServiceMetrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OverflowServices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OverflowServices == nil { + m.OverflowServices = OverflowFromVTPool() + } + if err := m.OverflowServices.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OverflowServicesEstimator", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OverflowServicesEstimator = append(m.OverflowServicesEstimator[:0], dAtA[iNdEx:postIndex]...) + if m.OverflowServicesEstimator == nil { + m.OverflowServicesEstimator = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field EventsTotal", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.EventsTotal = float64(math.Float64frombits(v)) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field YoungestEventTimestamp", wireType) + } + m.YoungestEventTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.YoungestEventTimestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyedServiceMetrics) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyedServiceMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyedServiceMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Key == nil { + m.Key = ServiceAggregationKeyFromVTPool() + } + if err := m.Key.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metrics == nil { + m.Metrics = ServiceMetricsFromVTPool() + } + if err := m.Metrics.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAggregationKey) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAggregationKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAggregationKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceEnvironment", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceEnvironment = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceLanguageName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceLanguageName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalLabelsStr", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GlobalLabelsStr = append(m.GlobalLabelsStr[:0], dAtA[iNdEx:postIndex]...) + if m.GlobalLabelsStr == nil { + m.GlobalLabelsStr = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceMetrics) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OverflowGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OverflowGroups == nil { + m.OverflowGroups = OverflowFromVTPool() + } + if err := m.OverflowGroups.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if len(m.TransactionMetrics) == cap(m.TransactionMetrics) { + m.TransactionMetrics = append(m.TransactionMetrics, &KeyedTransactionMetrics{}) + } else { + m.TransactionMetrics = m.TransactionMetrics[:len(m.TransactionMetrics)+1] + if m.TransactionMetrics[len(m.TransactionMetrics)-1] == nil { + m.TransactionMetrics[len(m.TransactionMetrics)-1] = &KeyedTransactionMetrics{} + } + } + if err := m.TransactionMetrics[len(m.TransactionMetrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceTransactionMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if len(m.ServiceTransactionMetrics) == cap(m.ServiceTransactionMetrics) { + m.ServiceTransactionMetrics = append(m.ServiceTransactionMetrics, &KeyedServiceTransactionMetrics{}) + } else { + m.ServiceTransactionMetrics = m.ServiceTransactionMetrics[:len(m.ServiceTransactionMetrics)+1] + if m.ServiceTransactionMetrics[len(m.ServiceTransactionMetrics)-1] == nil { + m.ServiceTransactionMetrics[len(m.ServiceTransactionMetrics)-1] = &KeyedServiceTransactionMetrics{} + } + } + if err := m.ServiceTransactionMetrics[len(m.ServiceTransactionMetrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if len(m.SpanMetrics) == cap(m.SpanMetrics) { + m.SpanMetrics = append(m.SpanMetrics, &KeyedSpanMetrics{}) + } else { + m.SpanMetrics = m.SpanMetrics[:len(m.SpanMetrics)+1] + if m.SpanMetrics[len(m.SpanMetrics)-1] == nil { + m.SpanMetrics[len(m.SpanMetrics)-1] = &KeyedSpanMetrics{} + } + } + if err := m.SpanMetrics[len(m.SpanMetrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyedTransactionMetrics) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyedTransactionMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyedTransactionMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Key == nil { + m.Key = TransactionAggregationKeyFromVTPool() + } + if err := m.Key.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metrics == nil { + m.Metrics = TransactionMetricsFromVTPool() + } + if err := m.Metrics.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransactionAggregationKey) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransactionAggregationKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransactionAggregationKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TraceRoot = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubernetesPodName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubernetesPodName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceNodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceNodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceRuntimeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceRuntimeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceRuntimeVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceRuntimeVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceLanguageVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceLanguageVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostHostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostHostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostOsPlatform", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostOsPlatform = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventOutcome", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EventOutcome = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TransactionName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TransactionType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionResult", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TransactionResult = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FaasColdstart", wireType) + } + m.FaasColdstart = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FaasColdstart |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FaasId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FaasId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FaasName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FaasName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FaasVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FaasVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FaasTriggerType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FaasTriggerType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloudProvider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloudProvider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloudRegion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloudRegion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloudAvailabilityZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloudAvailabilityZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloudServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloudServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloudAccountId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloudAccountId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloudAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloudAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloudMachineType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloudMachineType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloudProjectId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloudProjectId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloudProjectName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloudProjectName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransactionMetrics) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransactionMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransactionMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Histogram == nil { + m.Histogram = HDRHistogramFromVTPool() + } + if err := m.Histogram.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyedServiceTransactionMetrics) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyedServiceTransactionMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyedServiceTransactionMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Key == nil { + m.Key = ServiceTransactionAggregationKeyFromVTPool() + } + if err := m.Key.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metrics == nil { + m.Metrics = ServiceTransactionMetricsFromVTPool() + } + if err := m.Metrics.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceTransactionAggregationKey) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceTransactionAggregationKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceTransactionAggregationKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TransactionType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceTransactionMetrics) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceTransactionMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceTransactionMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Histogram == nil { + m.Histogram = HDRHistogramFromVTPool() + } + if err := m.Histogram.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureCount", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.FailureCount = float64(math.Float64frombits(v)) + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessCount", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SuccessCount = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyedSpanMetrics) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyedSpanMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyedSpanMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Key == nil { + m.Key = SpanAggregationKeyFromVTPool() + } + if err := m.Key.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metrics == nil { + m.Metrics = SpanMetricsFromVTPool() + } + if err := m.Metrics.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SpanAggregationKey) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SpanAggregationKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SpanAggregationKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpanName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outcome", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Outcome = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SpanMetrics) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SpanMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SpanMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Overflow) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overflow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overflow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OverflowTransactions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OverflowTransactions == nil { + m.OverflowTransactions = TransactionMetricsFromVTPool() + } + if err := m.OverflowTransactions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OverflowServiceTransactions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OverflowServiceTransactions == nil { + m.OverflowServiceTransactions = ServiceTransactionMetricsFromVTPool() + } + if err := m.OverflowServiceTransactions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OverflowSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OverflowSpans == nil { + m.OverflowSpans = SpanMetricsFromVTPool() + } + if err := m.OverflowSpans.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OverflowTransactionsEstimator", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OverflowTransactionsEstimator = append(m.OverflowTransactionsEstimator[:0], dAtA[iNdEx:postIndex]...) + if m.OverflowTransactionsEstimator == nil { + m.OverflowTransactionsEstimator = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OverflowServiceTransactionsEstimator", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OverflowServiceTransactionsEstimator = append(m.OverflowServiceTransactionsEstimator[:0], dAtA[iNdEx:postIndex]...) + if m.OverflowServiceTransactionsEstimator == nil { + m.OverflowServiceTransactionsEstimator = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OverflowSpansEstimator", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OverflowSpansEstimator = append(m.OverflowSpansEstimator[:0], dAtA[iNdEx:postIndex]...) + if m.OverflowSpansEstimator == nil { + m.OverflowSpansEstimator = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HDRHistogram) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HDRHistogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HDRHistogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LowestTrackableValue", wireType) + } + m.LowestTrackableValue = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LowestTrackableValue |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HighestTrackableValue", wireType) + } + m.HighestTrackableValue = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HighestTrackableValue |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SignificantFigures", wireType) + } + m.SignificantFigures = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SignificantFigures |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Counts = append(m.Counts, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Counts) == 0 && cap(m.Counts) < elementCount { + m.Counts = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Counts = append(m.Counts, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Counts", wireType) + } + case 5: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Buckets = append(m.Buckets, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Buckets) == 0 && cap(m.Buckets) < elementCount { + m.Buckets = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Buckets = append(m.Buckets, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Buckets", wireType) + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/copy/apm-aggregation/aggregationpb/doc.go b/copy/apm-aggregation/aggregationpb/doc.go new file mode 100644 index 00000000000..bd6545fe8b7 --- /dev/null +++ b/copy/apm-aggregation/aggregationpb/doc.go @@ -0,0 +1,7 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Package aggregationpb holds all the generated code from protobuf definitions +// held in the `proto` folder. +package aggregationpb diff --git a/copy/apm-aggregation/aggregationpb/labels.pb.go b/copy/apm-aggregation/aggregationpb/labels.pb.go new file mode 100644 index 00000000000..8ce1c1e6d8d --- /dev/null +++ b/copy/apm-aggregation/aggregationpb/labels.pb.go @@ -0,0 +1,325 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v4.22.1 +// source: proto/labels.proto + +package aggregationpb + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GlobalLabels struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Labels []*Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` + NumericLabels []*NumericLabel `protobuf:"bytes,2,rep,name=numeric_labels,json=numericLabels,proto3" json:"numeric_labels,omitempty"` +} + +func (x *GlobalLabels) Reset() { + *x = GlobalLabels{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_labels_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GlobalLabels) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GlobalLabels) ProtoMessage() {} + +func (x *GlobalLabels) ProtoReflect() protoreflect.Message { + mi := &file_proto_labels_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GlobalLabels.ProtoReflect.Descriptor instead. +func (*GlobalLabels) Descriptor() ([]byte, []int) { + return file_proto_labels_proto_rawDescGZIP(), []int{0} +} + +func (x *GlobalLabels) GetLabels() []*Label { + if x != nil { + return x.Labels + } + return nil +} + +func (x *GlobalLabels) GetNumericLabels() []*NumericLabel { + if x != nil { + return x.NumericLabels + } + return nil +} + +type Label struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Values []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *Label) Reset() { + *x = Label{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_labels_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Label) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Label) ProtoMessage() {} + +func (x *Label) ProtoReflect() protoreflect.Message { + mi := &file_proto_labels_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Label.ProtoReflect.Descriptor instead. +func (*Label) Descriptor() ([]byte, []int) { + return file_proto_labels_proto_rawDescGZIP(), []int{1} +} + +func (x *Label) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *Label) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *Label) GetValues() []string { + if x != nil { + return x.Values + } + return nil +} + +type NumericLabel struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + Values []float64 `protobuf:"fixed64,3,rep,packed,name=values,proto3" json:"values,omitempty"` +} + +func (x *NumericLabel) Reset() { + *x = NumericLabel{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_labels_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NumericLabel) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NumericLabel) ProtoMessage() {} + +func (x *NumericLabel) ProtoReflect() protoreflect.Message { + mi := &file_proto_labels_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NumericLabel.ProtoReflect.Descriptor instead. +func (*NumericLabel) Descriptor() ([]byte, []int) { + return file_proto_labels_proto_rawDescGZIP(), []int{2} +} + +func (x *NumericLabel) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *NumericLabel) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *NumericLabel) GetValues() []float64 { + if x != nil { + return x.Values + } + return nil +} + +var File_proto_labels_proto protoreflect.FileDescriptor + +var file_proto_labels_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, + 0x6d, 0x2e, 0x76, 0x31, 0x22, 0x82, 0x01, 0x0a, 0x0c, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x2d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, + 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x12, 0x43, 0x0a, 0x0e, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, + 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x61, 0x70, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x75, + 0x6d, 0x65, 0x72, 0x69, 0x63, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x65, + 0x72, 0x69, 0x63, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x22, 0x47, 0x0a, 0x05, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x22, 0x4e, 0x0a, 0x0c, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x01, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x42, 0x13, 0x48, 0x01, 0x5a, 0x0f, 0x2e, 0x2f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_labels_proto_rawDescOnce sync.Once + file_proto_labels_proto_rawDescData = file_proto_labels_proto_rawDesc +) + +func file_proto_labels_proto_rawDescGZIP() []byte { + file_proto_labels_proto_rawDescOnce.Do(func() { + file_proto_labels_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_labels_proto_rawDescData) + }) + return file_proto_labels_proto_rawDescData +} + +var file_proto_labels_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_proto_labels_proto_goTypes = []interface{}{ + (*GlobalLabels)(nil), // 0: elastic.apm.v1.GlobalLabels + (*Label)(nil), // 1: elastic.apm.v1.Label + (*NumericLabel)(nil), // 2: elastic.apm.v1.NumericLabel +} +var file_proto_labels_proto_depIdxs = []int32{ + 1, // 0: elastic.apm.v1.GlobalLabels.labels:type_name -> elastic.apm.v1.Label + 2, // 1: elastic.apm.v1.GlobalLabels.numeric_labels:type_name -> elastic.apm.v1.NumericLabel + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_proto_labels_proto_init() } +func file_proto_labels_proto_init() { + if File_proto_labels_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_labels_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GlobalLabels); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_labels_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Label); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_labels_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NumericLabel); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_labels_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_labels_proto_goTypes, + DependencyIndexes: file_proto_labels_proto_depIdxs, + MessageInfos: file_proto_labels_proto_msgTypes, + }.Build() + File_proto_labels_proto = out.File + file_proto_labels_proto_rawDesc = nil + file_proto_labels_proto_goTypes = nil + file_proto_labels_proto_depIdxs = nil +} diff --git a/copy/apm-aggregation/aggregationpb/labels_vtproto.pb.go b/copy/apm-aggregation/aggregationpb/labels_vtproto.pb.go new file mode 100644 index 00000000000..946242ca1f2 --- /dev/null +++ b/copy/apm-aggregation/aggregationpb/labels_vtproto.pb.go @@ -0,0 +1,839 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.5.0 +// source: proto/labels.proto + +package aggregationpb + +import ( + binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + sync "sync" + + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *GlobalLabels) CloneVT() *GlobalLabels { + if m == nil { + return (*GlobalLabels)(nil) + } + r := &GlobalLabels{} + if rhs := m.Labels; rhs != nil { + tmpContainer := make([]*Label, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Labels = tmpContainer + } + if rhs := m.NumericLabels; rhs != nil { + tmpContainer := make([]*NumericLabel, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.NumericLabels = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GlobalLabels) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Label) CloneVT() *Label { + if m == nil { + return (*Label)(nil) + } + r := &Label{ + Key: m.Key, + Value: m.Value, + } + if rhs := m.Values; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Values = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Label) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *NumericLabel) CloneVT() *NumericLabel { + if m == nil { + return (*NumericLabel)(nil) + } + r := &NumericLabel{ + Key: m.Key, + Value: m.Value, + } + if rhs := m.Values; rhs != nil { + tmpContainer := make([]float64, len(rhs)) + copy(tmpContainer, rhs) + r.Values = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *NumericLabel) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GlobalLabels) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GlobalLabels) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GlobalLabels) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.NumericLabels) > 0 { + for iNdEx := len(m.NumericLabels) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.NumericLabels[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Labels[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Label) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Label) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Label) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NumericLabel) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NumericLabel) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *NumericLabel) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.Values[iNdEx])) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = encodeVarint(dAtA, i, uint64(len(m.Values)*8)) + i-- + dAtA[i] = 0x1a + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +var vtprotoPool_GlobalLabels = sync.Pool{ + New: func() interface{} { + return &GlobalLabels{} + }, +} + +func (m *GlobalLabels) ResetVT() { + for _, mm := range m.Labels { + mm.ResetVT() + } + f0 := m.Labels[:0] + for _, mm := range m.NumericLabels { + mm.ResetVT() + } + f1 := m.NumericLabels[:0] + m.Reset() + m.Labels = f0 + m.NumericLabels = f1 +} +func (m *GlobalLabels) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_GlobalLabels.Put(m) + } +} +func GlobalLabelsFromVTPool() *GlobalLabels { + return vtprotoPool_GlobalLabels.Get().(*GlobalLabels) +} + +var vtprotoPool_Label = sync.Pool{ + New: func() interface{} { + return &Label{} + }, +} + +func (m *Label) ResetVT() { + f0 := m.Values[:0] + m.Reset() + m.Values = f0 +} +func (m *Label) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_Label.Put(m) + } +} +func LabelFromVTPool() *Label { + return vtprotoPool_Label.Get().(*Label) +} + +var vtprotoPool_NumericLabel = sync.Pool{ + New: func() interface{} { + return &NumericLabel{} + }, +} + +func (m *NumericLabel) ResetVT() { + f0 := m.Values[:0] + m.Reset() + m.Values = f0 +} +func (m *NumericLabel) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_NumericLabel.Put(m) + } +} +func NumericLabelFromVTPool() *NumericLabel { + return vtprotoPool_NumericLabel.Get().(*NumericLabel) +} +func (m *GlobalLabels) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.NumericLabels) > 0 { + for _, e := range m.NumericLabels { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Label) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *NumericLabel) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Value != 0 { + n += 9 + } + if len(m.Values) > 0 { + n += 1 + sov(uint64(len(m.Values)*8)) + len(m.Values)*8 + } + n += len(m.unknownFields) + return n +} + +func (m *GlobalLabels) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlobalLabels: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlobalLabels: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if len(m.Labels) == cap(m.Labels) { + m.Labels = append(m.Labels, &Label{}) + } else { + m.Labels = m.Labels[:len(m.Labels)+1] + if m.Labels[len(m.Labels)-1] == nil { + m.Labels[len(m.Labels)-1] = &Label{} + } + } + if err := m.Labels[len(m.Labels)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NumericLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if len(m.NumericLabels) == cap(m.NumericLabels) { + m.NumericLabels = append(m.NumericLabels, &NumericLabel{}) + } else { + m.NumericLabels = m.NumericLabels[:len(m.NumericLabels)+1] + if m.NumericLabels[len(m.NumericLabels)-1] == nil { + m.NumericLabels[len(m.NumericLabels)-1] = &NumericLabel{} + } + } + if err := m.NumericLabels[len(m.NumericLabels)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Label) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Label: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NumericLabel) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NumericLabel: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NumericLabel: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Values = append(m.Values, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.Values) == 0 && cap(m.Values) < elementCount { + m.Values = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Values = append(m.Values, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/copy/apm-aggregation/aggregators/aggregator.go b/copy/apm-aggregation/aggregators/aggregator.go new file mode 100644 index 00000000000..2b384bdb4c8 --- /dev/null +++ b/copy/apm-aggregation/aggregators/aggregator.go @@ -0,0 +1,777 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Package aggregators holds the logic for doing the actual aggregation. +package aggregators + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/vfs" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "github.com/elastic/apm-aggregation/aggregationpb" + "github.com/elastic/apm-aggregation/aggregators/internal/telemetry" + "github.com/elastic/apm-data/model/modelpb" +) + +const ( + dbCommitThresholdBytes = 10 * 1024 * 1024 // commit every 10MB + aggregationIvlKey = "aggregation_interval" + aggregationTypeKey = "aggregation_type" +) + +var ( + // ErrAggregatorClosed means that aggregator was closed when the + // method was called and thus cannot be processed further. + ErrAggregatorClosed = errors.New("aggregator is closed") +) + +// Aggregator represents a LSM based aggregator instance to generate +// aggregated metrics. The metrics aggregated by the aggregator are +// harvested based on the aggregation interval and processed by the +// defined processor. The aggregated metrics are timestamped based +// on when the aggregator is created and the harvest loop. All the +// events collected between call to New and Run are collected in the +// same processing time bucket and thereafter the processing time +// bucket is advanced in factors of aggregation interval. +type Aggregator struct { + db *pebble.DB + writeOptions *pebble.WriteOptions + cfg config + + mu sync.Mutex + processingTime time.Time + batch *pebble.Batch + cachedEvents cachedEventsMap + + closed chan struct{} + runStopped chan struct{} + + metrics *telemetry.Metrics +} + +// New returns a new aggregator instance. +// +// Close must be called when the the aggregator is no longer needed. +func New(opts ...Option) (*Aggregator, error) { + cfg, err := newConfig(opts...) + if err != nil { + return nil, fmt.Errorf("failed to create aggregation config: %w", err) + } + + pebbleOpts := &pebble.Options{ + Merger: &pebble.Merger{ + Name: "combined_metrics_merger", + Merge: func(_, value []byte) (pebble.ValueMerger, error) { + merger := combinedMetricsMerger{ + limits: cfg.Limits, + constraints: newConstraints(cfg.Limits), + } + pb := aggregationpb.CombinedMetrics{} + if err := pb.UnmarshalVT(value); err != nil { + return nil, fmt.Errorf("failed to unmarshal metrics: %w", err) + } + merger.merge(&pb) + return &merger, nil + }, + }, + } + writeOptions := pebble.Sync + if cfg.InMemory { + pebbleOpts.FS = vfs.NewMem() + pebbleOpts.DisableWAL = true + writeOptions = pebble.NoSync + } + pb, err := pebble.Open(cfg.DataDir, pebbleOpts) + if err != nil { + return nil, fmt.Errorf("failed to create pebble db: %w", err) + } + + metrics, err := telemetry.NewMetrics( + func() *pebble.Metrics { return pb.Metrics() }, + telemetry.WithMeter(cfg.Meter), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metrics: %w", err) + } + + return &Aggregator{ + db: pb, + writeOptions: writeOptions, + cfg: cfg, + processingTime: time.Now().Truncate(cfg.AggregationIntervals[0]), + closed: make(chan struct{}), + metrics: metrics, + }, nil +} + +// AggregateBatch aggregates all events in the batch. This function will return +// an error if the aggregator's Run loop has errored or has been explicitly stopped. +// However, it doesn't require aggregator to be running to perform aggregation. +func (a *Aggregator) AggregateBatch( + ctx context.Context, + id [16]byte, + b *modelpb.Batch, +) error { + cmIDAttrs := a.cfg.CombinedMetricsIDToKVs(id) + + a.mu.Lock() + defer a.mu.Unlock() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-a.closed: + return ErrAggregatorClosed + default: + } + + var ( + errs []error + successBytes, failBytes int64 + ) + cmk := CombinedMetricsKey{ID: id} + for _, ivl := range a.cfg.AggregationIntervals { + cmk.ProcessingTime = a.processingTime.Truncate(ivl) + cmk.Interval = ivl + for _, e := range *b { + bytesIn, err := a.aggregateAPMEvent(ctx, cmk, e) + if err != nil { + errs = append(errs, err) + failBytes += int64(bytesIn) + } else { + successBytes += int64(bytesIn) + } + } + a.cachedEvents.add(ivl, id, float64(len(*b))) + } + + var err error + if len(errs) > 0 { + a.metrics.BytesProcessed.Add(context.Background(), failBytes, metric.WithAttributeSet( + attribute.NewSet(append(cmIDAttrs, telemetry.WithFailure())...), + )) + err = fmt.Errorf("failed batch aggregation:\n%w", errors.Join(errs...)) + } + a.metrics.BytesProcessed.Add(context.Background(), successBytes, metric.WithAttributeSet( + attribute.NewSet(append(cmIDAttrs, telemetry.WithSuccess())...), + )) + return err +} + +// AggregateCombinedMetrics aggregates partial metrics into a bigger aggregate. +// This function will return an error if the aggregator's Run loop has errored +// or has been explicitly stopped. However, it doesn't require aggregator to be +// running to perform aggregation. +func (a *Aggregator) AggregateCombinedMetrics( + ctx context.Context, + cmk CombinedMetricsKey, + cm *aggregationpb.CombinedMetrics, +) error { + cmIDAttrs := a.cfg.CombinedMetricsIDToKVs(cmk.ID) + traceAttrs := append(cmIDAttrs, + attribute.String(aggregationIvlKey, formatDuration(cmk.Interval)), + attribute.String("processing_time", cmk.ProcessingTime.String()), + ) + ctx, span := a.cfg.Tracer.Start(ctx, "AggregateCombinedMetrics", trace.WithAttributes(traceAttrs...)) + defer span.End() + + a.mu.Lock() + defer a.mu.Unlock() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-a.closed: + return ErrAggregatorClosed + default: + } + + if cmk.ProcessingTime.Before(a.processingTime.Add(-a.cfg.Lookback)) { + a.metrics.EventsProcessed.Add( + context.Background(), cm.EventsTotal, + metric.WithAttributeSet(attribute.NewSet( + append(a.cfg.CombinedMetricsIDToKVs(cmk.ID), + attribute.String(aggregationIvlKey, formatDuration(cmk.Interval)), + telemetry.WithFailure(), + )..., + )), + ) + a.cfg.Logger.Warn( + "received expired combined metrics, dropping silently", + zap.Time("received_processing_time", cmk.ProcessingTime), + zap.Time("current_processing_time", a.processingTime), + ) + return nil + } + + var attrSetOpt metric.MeasurementOption + bytesIn, err := a.aggregate(ctx, cmk, cm) + if err != nil { + attrSetOpt = metric.WithAttributeSet( + attribute.NewSet(append(cmIDAttrs, telemetry.WithFailure())...), + ) + } else { + attrSetOpt = metric.WithAttributeSet( + attribute.NewSet(append(cmIDAttrs, telemetry.WithSuccess())...), + ) + } + + span.SetAttributes(attribute.Int("bytes_ingested", bytesIn)) + a.cachedEvents.add(cmk.Interval, cmk.ID, cm.EventsTotal) + a.metrics.BytesProcessed.Add(context.Background(), int64(bytesIn), attrSetOpt) + return err +} + +// Run harvests the aggregated results periodically. For an aggregator, +// Run must be called at-most once. +// - Running more than once will return an error +// - Running after aggregator is stopped will return ErrAggregatorClosed. +func (a *Aggregator) Run(ctx context.Context) error { + a.mu.Lock() + if a.runStopped != nil { + a.mu.Unlock() + return errors.New("aggregator is already running") + } + a.runStopped = make(chan struct{}) + a.mu.Unlock() + defer close(a.runStopped) + + to := a.processingTime.Add(a.cfg.AggregationIntervals[0]) + timer := time.NewTimer(time.Until(to.Add(a.cfg.HarvestDelay))) + defer timer.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-a.closed: + return ErrAggregatorClosed + case <-timer.C: + } + + a.mu.Lock() + batch := a.batch + a.batch = nil + a.processingTime = to + cachedEventsStats := a.cachedEvents.loadAndDelete(to) + a.mu.Unlock() + + if err := a.commitAndHarvest(ctx, batch, to, cachedEventsStats); err != nil { + a.cfg.Logger.Warn("failed to commit and harvest metrics", zap.Error(err)) + } + to = to.Add(a.cfg.AggregationIntervals[0]) + timer.Reset(time.Until(to.Add(a.cfg.HarvestDelay))) + } +} + +// Close commits and closes any buffered writes, stops any running harvester, +// performs a final harvest, and closes the underlying database. +// +// No further writes may be performed after Close is called, and no further +// harvests will be performed once Close returns. +func (a *Aggregator) Close(ctx context.Context) error { + ctx, span := a.cfg.Tracer.Start(ctx, "Aggregator.Close") + defer span.End() + + a.mu.Lock() + defer a.mu.Unlock() + + select { + case <-a.closed: + default: + a.cfg.Logger.Info("stopping aggregator") + close(a.closed) + } + if a.runStopped != nil { + select { + case <-ctx.Done(): + return fmt.Errorf("context cancelled while waiting for run to complete: %w", ctx.Err()) + case <-a.runStopped: + } + } + + if a.db != nil { + a.cfg.Logger.Info("running final aggregation") + if a.batch != nil { + if err := a.batch.Commit(a.writeOptions); err != nil { + span.RecordError(err) + return fmt.Errorf("failed to commit batch: %w", err) + } + if err := a.batch.Close(); err != nil { + span.RecordError(err) + return fmt.Errorf("failed to close batch: %w", err) + } + a.batch = nil + } + var errs []error + for _, ivl := range a.cfg.AggregationIntervals { + // At any particular time there will be 1 harvest candidate for + // each aggregation interval. We will align the end time and + // process each of these. + // + // TODO (lahsivjar): It is possible to harvest the same + // time multiple times, not an issue but can be optimized. + to := a.processingTime.Truncate(ivl).Add(ivl) + if err := a.harvest(ctx, to, a.cachedEvents.loadAndDelete(to)); err != nil { + span.RecordError(err) + errs = append(errs, fmt.Errorf( + "failed to harvest metrics for interval %s: %w", formatDuration(ivl), err), + ) + } + } + if len(errs) > 0 { + return fmt.Errorf("failed while running final harvest: %w", errors.Join(errs...)) + } + if err := a.db.Close(); err != nil { + span.RecordError(err) + return fmt.Errorf("failed to close pebble: %w", err) + } + // All future operations are invalid after db is closed + a.db = nil + } + if err := a.metrics.CleanUp(); err != nil { + span.RecordError(err) + return fmt.Errorf("failed to cleanup instrumentation: %w", err) + } + return nil +} + +func (a *Aggregator) aggregateAPMEvent( + ctx context.Context, + cmk CombinedMetricsKey, + e *modelpb.APMEvent, +) (int, error) { + var totalBytesIn int + aggregateFunc := func(k CombinedMetricsKey, m *aggregationpb.CombinedMetrics) error { + bytesIn, err := a.aggregate(ctx, k, m) + totalBytesIn += bytesIn + return err + } + err := eventToCombinedMetrics(e, cmk, a.cfg.Partitions, aggregateFunc) + if err != nil { + return 0, fmt.Errorf("failed to aggregate combined metrics: %w", err) + } + return totalBytesIn, nil +} + +// aggregate aggregates combined metrics for a given key and returns +// number of bytes ingested along with the error, if any. +func (a *Aggregator) aggregate( + ctx context.Context, + cmk CombinedMetricsKey, + cm *aggregationpb.CombinedMetrics, +) (int, error) { + if a.batch == nil { + // Batch is backed by a sync pool. After each commit we will release the batch + // back to the pool by calling Batch#Close and subsequently acquire a new batch. + a.batch = a.db.NewBatch() + } + + op := a.batch.MergeDeferred(cmk.SizeBinary(), cm.SizeVT()) + if err := cmk.MarshalBinaryToSizedBuffer(op.Key); err != nil { + return 0, fmt.Errorf("failed to marshal combined metrics key: %w", err) + } + if _, err := cm.MarshalToSizedBufferVT(op.Value); err != nil { + return 0, fmt.Errorf("failed to marshal combined metrics: %w", err) + } + if err := op.Finish(); err != nil { + return 0, fmt.Errorf("failed to finalize merge operation: %w", err) + } + + bytesIn := cm.SizeVT() + if a.batch.Len() >= dbCommitThresholdBytes { + if err := a.batch.Commit(a.writeOptions); err != nil { + return bytesIn, fmt.Errorf("failed to commit pebble batch: %w", err) + } + if err := a.batch.Close(); err != nil { + return bytesIn, fmt.Errorf("failed to close pebble batch: %w", err) + } + a.batch = nil + } + return bytesIn, nil +} + +func (a *Aggregator) commitAndHarvest( + ctx context.Context, + batch *pebble.Batch, + to time.Time, + cachedEventsStats map[time.Duration]map[[16]byte]float64, +) error { + ctx, span := a.cfg.Tracer.Start(ctx, "commitAndHarvest") + defer span.End() + + var errs []error + if batch != nil { + if err := batch.Commit(a.writeOptions); err != nil { + span.RecordError(err) + errs = append(errs, fmt.Errorf("failed to commit batch before harvest: %w", err)) + } + if err := batch.Close(); err != nil { + span.RecordError(err) + errs = append(errs, fmt.Errorf("failed to close batch before harvest: %w", err)) + } + } + if err := a.harvest(ctx, to, cachedEventsStats); err != nil { + span.RecordError(err) + errs = append(errs, fmt.Errorf("failed to harvest aggregated metrics: %w", err)) + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil +} + +// harvest collects the mature metrics for all aggregation intervals and +// deletes the entries in db once the metrics are fully harvested. Harvest +// takes an end time denoting the exclusive upper bound for harvesting. +func (a *Aggregator) harvest( + ctx context.Context, + end time.Time, + cachedEventsStats map[time.Duration]map[[16]byte]float64, +) error { + snap := a.db.NewSnapshot() + defer snap.Close() + + var errs []error + for _, ivl := range a.cfg.AggregationIntervals { + // Check if the given aggregation interval needs to be harvested now + if end.Truncate(ivl).Equal(end) { + start := end.Add(-ivl).Add(-a.cfg.Lookback) + cmCount, err := a.harvestForInterval( + ctx, snap, start, end, ivl, cachedEventsStats[ivl], + ) + if err != nil { + errs = append(errs, fmt.Errorf( + "failed to harvest aggregated metrics for interval %s: %w", + ivl, err, + )) + } + a.cfg.Logger.Debug( + "Finished harvesting aggregated metrics", + zap.Int("combined_metrics_successfully_harvested", cmCount), + zap.Duration("aggregation_interval_ns", ivl), + zap.Time("harvested_till(exclusive)", end), + zap.Error(err), + ) + } + } + return errors.Join(errs...) +} + +// harvestForInterval harvests aggregated metrics for a given interval. +// Returns the number of combined metrics successfully harvested and an +// error. It is possible to have non nil error and greater than 0 +// combined metrics if some of the combined metrics failed harvest. +func (a *Aggregator) harvestForInterval( + ctx context.Context, + snap *pebble.Snapshot, + start, end time.Time, + ivl time.Duration, + cachedEventsStats map[[16]byte]float64, +) (int, error) { + from := CombinedMetricsKey{ + Interval: ivl, + ProcessingTime: start, + } + to := CombinedMetricsKey{ + Interval: ivl, + ProcessingTime: end, + } + lb := make([]byte, CombinedMetricsKeyEncodedSize) + ub := make([]byte, CombinedMetricsKeyEncodedSize) + from.MarshalBinaryToSizedBuffer(lb) + to.MarshalBinaryToSizedBuffer(ub) + + iter, err := snap.NewIter(&pebble.IterOptions{ + LowerBound: lb, + UpperBound: ub, + KeyTypes: pebble.IterKeyTypePointsOnly, + }) + if err != nil { + return 0, fmt.Errorf("failed to create iter: %w", err) + } + defer iter.Close() + + var errs []error + var cmCount int + ivlAttr := attribute.String(aggregationIvlKey, formatDuration(ivl)) + for iter.First(); iter.Valid(); iter.Next() { + var cmk CombinedMetricsKey + if err := cmk.UnmarshalBinary(iter.Key()); err != nil { + errs = append(errs, fmt.Errorf("failed to unmarshal key: %w", err)) + continue + } + harvestStats, err := a.processHarvest(ctx, cmk, iter.Value(), ivl) + if err != nil { + errs = append(errs, err) + continue + } + cmCount++ + + commonAttrsOpt := metric.WithAttributeSet(attribute.NewSet( + append(a.cfg.CombinedMetricsIDToKVs(cmk.ID), ivlAttr)..., + )) + + // Report the estimated number of overflowed metrics per aggregation interval. + // It is not meaningful to aggregate these across intervals or aggregators, + // as the overflowed aggregation keys may be overlapping sets. + recordMetricsOverflow := func(n uint64, aggregationType string) { + if n == 0 { + return + } + a.metrics.MetricsOverflowed.Add(context.Background(), int64(n), commonAttrsOpt, + metric.WithAttributeSet(attribute.NewSet( + attribute.String(aggregationTypeKey, aggregationType), + )), + ) + } + recordMetricsOverflow(harvestStats.servicesOverflowed, "service") + recordMetricsOverflow(harvestStats.transactionsOverflowed, "transaction") + recordMetricsOverflow(harvestStats.serviceTransactionsOverflowed, "service_transaction") + recordMetricsOverflow(harvestStats.spansOverflowed, "service_destination") + + // processingDelay is normalized by subtracting aggregation interval and + // harvest delay, both of which are expected delays. Normalization helps + // us to use the lower (higher resolution) range of the histogram for the + // important values. The normalized processingDelay can be negative as a + // result of premature harvest triggered by a stop of the aggregator. The + // negative value is accepted as a good value and recorded in the lower + // histogram buckets. + processingDelay := time.Since(cmk.ProcessingTime).Seconds() - + (ivl.Seconds() + a.cfg.HarvestDelay.Seconds()) + // queuedDelay is not explicitly normalized because we want to record the + // full delay. For a healthy deployment, the queued delay would be + // implicitly normalized due to the usage of youngest event timestamp. + // Negative values are possible at edges due to delays in running the + // harvest loop or time sync issues between agents and server. + queuedDelay := time.Since(harvestStats.youngestEventTimestamp).Seconds() + outcomeAttrOpt := metric.WithAttributeSet(attribute.NewSet( + telemetry.WithSuccess()), + ) + a.metrics.MinQueuedDelay.Record(context.Background(), queuedDelay, commonAttrsOpt, outcomeAttrOpt) + a.metrics.ProcessingLatency.Record(context.Background(), processingDelay, commonAttrsOpt, outcomeAttrOpt) + // Events harvested have been successfully processed, publish these + // as success. Update the map to keep track of events failed. + a.metrics.EventsProcessed.Add(context.Background(), harvestStats.eventsTotal, commonAttrsOpt, outcomeAttrOpt) + cachedEventsStats[cmk.ID] -= harvestStats.eventsTotal + } + err = a.db.DeleteRange(lb, ub, a.writeOptions) + if len(errs) > 0 { + err = errors.Join(err, fmt.Errorf( + "failed to process %d out of %d metrics:\n%w", + len(errs), cmCount, errors.Join(errs...), + )) + } + + // All remaining events in the cached events map should be failed events. + // Record these events with a failure outcome. + for cmID, eventsTotal := range cachedEventsStats { + if eventsTotal == 0 { + continue + } + if eventsTotal < 0 { + fields := append([]zap.Field{ + zap.Duration("aggregation_interval_ns", ivl), + zap.Float64("remaining_events", eventsTotal), + }, otelKVsToZapFields(a.cfg.CombinedMetricsIDToKVs(cmID))...) + a.cfg.Logger.Warn( + "unexpectedly failed to harvest all collected events", + fields..., + ) + continue + } + attrSetOpt := metric.WithAttributeSet( + attribute.NewSet(append( + a.cfg.CombinedMetricsIDToKVs(cmID), + ivlAttr, + telemetry.WithFailure(), + )...), + ) + a.metrics.EventsProcessed.Add(context.Background(), eventsTotal, attrSetOpt) + } + return cmCount, err +} + +func (a *Aggregator) processHarvest( + ctx context.Context, + cmk CombinedMetricsKey, + cmb []byte, + aggIvl time.Duration, +) (harvestStats, error) { + cm := aggregationpb.CombinedMetrics{} + if err := cm.UnmarshalVT(cmb); err != nil { + return harvestStats{}, fmt.Errorf("failed to unmarshal metrics: %w", err) + } + + // Processor can mutate the CombinedMetrics, so we cannot rely on the + // CombinedMetrics after Processor is called. Take a snapshot of the + // fields we record if processing succeeds. + hs := harvestStats{ + eventsTotal: cm.EventsTotal, + youngestEventTimestamp: modelpb.ToTime(cm.YoungestEventTimestamp), + servicesOverflowed: hllSketchEstimate(cm.OverflowServicesEstimator), + } + overflowLogger := nopLogger + if a.cfg.OverflowLogging { + fields := append([]zap.Field{ + zap.Duration("aggregation_interval_ns", aggIvl), + }, otelKVsToZapFields(a.cfg.CombinedMetricsIDToKVs(cmk.ID))...) + overflowLogger = a.cfg.Logger.WithLazy(fields...) + } + hs.addOverflows(&cm, a.cfg.Limits, overflowLogger) + + if err := a.cfg.Processor(ctx, cmk, &cm, aggIvl); err != nil { + return harvestStats{}, fmt.Errorf("failed to process combined metrics ID %s: %w", cmk.ID, err) + } + return hs, nil +} + +var ( + nopLogger = zap.NewNop() + + // TODO(carsonip): Update this log message when global labels implementation changes + serviceGroupLimitReachedMessage = fmt.Sprintf(""+ + "Service limit reached, new metric documents will be grouped under a dedicated "+ + "overflow bucket identified by service name '%s'. "+ + "If you are sending global labels that are request-specific (e.g. client IP), it may cause "+ + "high cardinality and lead to exhaustion of services.", + overflowBucketName, + ) + + transactionGroupLimitReachedMessage = "" + + "Transaction group per service limit reached, " + transactionGroupLimitReachedSuffix + overallTransactionGroupLimitReachedMessage = "" + + "Overall transaction group limit reached, " + transactionGroupLimitReachedSuffix + transactionGroupLimitReachedSuffix = fmt.Sprintf(""+ + "new metric documents will be grouped under a dedicated bucket identified by transaction name '%s'. "+ + "This is typically caused by ineffective transaction grouping, "+ + "e.g. by creating many unique transaction names. "+ + "If you are using an agent with 'use_path_as_transaction_name' enabled, it may cause "+ + "high cardinality. If your agent supports the 'transaction_name_groups' option, setting "+ + "that configuration option appropriately, may lead to better results.", + overflowBucketName, + ) + + serviceTransactionGroupLimitReachedMessage = fmt.Sprintf(""+ + "Service transaction group per service limit reached, new metric documents will be grouped "+ + "under a dedicated bucket identified by transaction type '%s'.", + overflowBucketName, + ) + overallServiceTransactionGroupLimitReachedMessage = fmt.Sprintf(""+ + "Overall service transaction group limit reached, new metric documents will be grouped "+ + "under a dedicated bucket identified by transaction type '%s'.", + overflowBucketName, + ) + + spanGroupLimitReachedMessage = fmt.Sprintf(""+ + "Span group per service limit reached, new metric documents will be grouped "+ + "under a dedicated bucket identified by service target name '%s'.", + overflowBucketName, + ) + overallSpanGroupLimitReachedMessage = fmt.Sprintf(""+ + "Overall span group limit reached, new metric documents will be grouped "+ + "under a dedicated bucket identified by service target name '%s'.", + overflowBucketName, + ) +) + +type harvestStats struct { + eventsTotal float64 + youngestEventTimestamp time.Time + + servicesOverflowed uint64 + transactionsOverflowed uint64 + serviceTransactionsOverflowed uint64 + spansOverflowed uint64 +} + +func (hs *harvestStats) addOverflows(cm *aggregationpb.CombinedMetrics, limits Limits, logger *zap.Logger) { + if hs.servicesOverflowed != 0 { + logger.Warn(serviceGroupLimitReachedMessage, zap.Int("limit", limits.MaxServices)) + } + + // Flags to indicate the overall limit reached per aggregation type, + // so that they are only logged once. + var loggedOverallTransactionGroupLimitReached bool + var loggedOverallServiceTransactionGroupLimitReached bool + var loggedOverallSpanGroupLimitReached bool + logLimitReached := func( + n, limit int, + serviceKey *aggregationpb.ServiceAggregationKey, + perServiceMessage string, + overallMessage string, + loggedOverallMessage *bool, + ) { + if serviceKey == nil { + // serviceKey will be nil for the service overflow, + // which is due to cardinality service keys, not + // metric keys. + return + } + if n >= limit { + logger.Warn( + perServiceMessage, + zap.String("service_name", serviceKey.GetServiceName()), + zap.Int("limit", limit), + ) + return + } else if !*loggedOverallMessage { + logger.Warn(overallMessage, zap.Int("limit", limit)) + *loggedOverallMessage = true + } + } + + addOverflow := func(o *aggregationpb.Overflow, ksm *aggregationpb.KeyedServiceMetrics) { + if o == nil { + return + } + if overflowed := hllSketchEstimate(o.OverflowTransactionsEstimator); overflowed > 0 { + hs.transactionsOverflowed += overflowed + logLimitReached( + len(ksm.GetMetrics().GetTransactionMetrics()), + limits.MaxTransactionGroupsPerService, + ksm.GetKey(), + transactionGroupLimitReachedMessage, + overallTransactionGroupLimitReachedMessage, + &loggedOverallTransactionGroupLimitReached, + ) + } + if overflowed := hllSketchEstimate(o.OverflowServiceTransactionsEstimator); overflowed > 0 { + hs.serviceTransactionsOverflowed += overflowed + logLimitReached( + len(ksm.GetMetrics().GetServiceTransactionMetrics()), + limits.MaxServiceTransactionGroupsPerService, + ksm.GetKey(), + serviceTransactionGroupLimitReachedMessage, + overallServiceTransactionGroupLimitReachedMessage, + &loggedOverallServiceTransactionGroupLimitReached, + ) + } + if overflowed := hllSketchEstimate(o.OverflowSpansEstimator); overflowed > 0 { + hs.spansOverflowed += overflowed + logLimitReached( + len(ksm.GetMetrics().GetSpanMetrics()), + limits.MaxSpanGroupsPerService, + ksm.GetKey(), + spanGroupLimitReachedMessage, + overallSpanGroupLimitReachedMessage, + &loggedOverallSpanGroupLimitReached, + ) + } + } + + addOverflow(cm.OverflowServices, nil) + for _, ksm := range cm.ServiceMetrics { + addOverflow(ksm.GetMetrics().GetOverflowGroups(), ksm) + } +} diff --git a/copy/apm-aggregation/aggregators/aggregator_test.go b/copy/apm-aggregation/aggregators/aggregator_test.go new file mode 100644 index 00000000000..686bbb49632 --- /dev/null +++ b/copy/apm-aggregation/aggregators/aggregator_test.go @@ -0,0 +1,1813 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "context" + "fmt" + "math/rand" + "net/netip" + "sort" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/cockroachdb/pebble" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.elastic.co/apm/module/apmotel/v2" + "go.elastic.co/apm/v2" + "go.elastic.co/apm/v2/apmtest" + apmmodel "go.elastic.co/apm/v2/model" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" + "golang.org/x/sync/errgroup" + "google.golang.org/protobuf/testing/protocmp" + + "github.com/elastic/apm-aggregation/aggregationpb" + "github.com/elastic/apm-aggregation/aggregators/internal/hdrhistogram" + "github.com/elastic/apm-data/model/modelpb" +) + +func TestNew(t *testing.T) { + agg, err := New() + assert.NoError(t, err) + assert.NotNil(t, agg) +} + +func TestAggregateBatch(t *testing.T) { + exp := tracetest.NewInMemoryExporter() + tp := sdktrace.NewTracerProvider( + sdktrace.WithSyncer(exp), + ) + gatherer, err := apmotel.NewGatherer() + require.NoError(t, err) + mp := metric.NewMeterProvider(metric.WithReader(gatherer)) + + cmID := EncodeToCombinedMetricsKeyID(t, "ab01") + eventDuration := 100 * time.Millisecond + dssDuration := 10 * time.Millisecond + uniqueEventCount := 100 // for each of txns and spans + uniqueServices := 10 + repCount := 5 + ts := time.Date(2022, 12, 31, 0, 0, 0, 0, time.UTC) + batch := make(modelpb.Batch, 0, uniqueEventCount*repCount*2) + // Distribute the total unique transaction count amongst the total + // unique services uniformly. + for i := 0; i < uniqueEventCount*repCount; i++ { + batch = append(batch, &modelpb.APMEvent{ + Event: &modelpb.Event{ + Outcome: "success", + Duration: uint64(eventDuration), + Received: modelpb.FromTime(ts), + }, + Transaction: &modelpb.Transaction{ + Name: fmt.Sprintf("foo%d", i%uniqueEventCount), + Type: fmt.Sprintf("txtype%d", i%uniqueEventCount), + RepresentativeCount: 1, + DroppedSpansStats: []*modelpb.DroppedSpanStats{ + { + DestinationServiceResource: fmt.Sprintf("dropped_dest_resource%d", i%uniqueEventCount), + Outcome: "success", + Duration: &modelpb.AggregatedDuration{ + Count: 1, + Sum: uint64(dssDuration), + }, + }, + }, + }, + Service: &modelpb.Service{Name: fmt.Sprintf("svc%d", i%uniqueServices)}, + }) + batch = append(batch, &modelpb.APMEvent{ + Event: &modelpb.Event{ + Duration: uint64(eventDuration), + Received: modelpb.FromTime(ts), + }, + Span: &modelpb.Span{ + Name: fmt.Sprintf("bar%d", i%uniqueEventCount), + Type: "type", + RepresentativeCount: 1, + DestinationService: &modelpb.DestinationService{ + Resource: "test_dest", + }, + }, + Service: &modelpb.Service{Name: fmt.Sprintf("svc%d", i%uniqueServices)}, + }) + } + + out := make(chan *aggregationpb.CombinedMetrics, 1) + aggIvl := time.Minute + agg, err := New( + WithDataDir(t.TempDir()), + WithLimits(Limits{ + MaxSpanGroups: 1000, + MaxSpanGroupsPerService: 100, + MaxTransactionGroups: 100, + MaxTransactionGroupsPerService: 10, + MaxServiceTransactionGroups: 100, + MaxServiceTransactionGroupsPerService: 10, + MaxServices: 10, + }), + WithProcessor(combinedMetricsProcessor(out)), + WithAggregationIntervals([]time.Duration{aggIvl}), + WithHarvestDelay(time.Hour), // disable auto harvest + WithTracer(tp.Tracer("test")), + WithMeter(mp.Meter("test")), + WithCombinedMetricsIDToKVs(func(id [16]byte) []attribute.KeyValue { + return []attribute.KeyValue{attribute.String("id_key", string(id[:]))} + }), + ) + require.NoError(t, err) + + require.NoError(t, agg.AggregateBatch(context.Background(), cmID, &batch)) + require.NoError(t, agg.Close(context.Background())) + var cm *aggregationpb.CombinedMetrics + select { + case cm = <-out: + default: + t.Error("failed to get aggregated metrics") + t.FailNow() + } + + var span tracetest.SpanStub + for _, s := range exp.GetSpans() { + if s.Name == "AggregateBatch" { + span = s + } + } + assert.NotNil(t, span) + + expectedCombinedMetrics := NewTestCombinedMetrics( + WithEventsTotal(float64(len(batch))), + WithYoungestEventTimestamp(ts), + ) + expectedMeasurements := []apmmodel.Metrics{ + { + Samples: map[string]apmmodel.Metric{ + "events.processed.count": {Value: float64(len(batch))}, + "events.processed.latency": {Type: "histogram", Counts: []uint64{1}, Values: []float64{0}}, + "events.processed.queued-latency": {Type: "histogram", Counts: []uint64{1}, Values: []float64{0}}, + }, + Labels: apmmodel.StringMap{ + apmmodel.StringMapItem{Key: aggregationIvlKey, Value: formatDuration(aggIvl)}, + apmmodel.StringMapItem{Key: "id_key", Value: string(cmID[:])}, + apmmodel.StringMapItem{Key: "outcome", Value: string("success")}, + }, + }, + { + Samples: map[string]apmmodel.Metric{ + "events.processed.bytes": {Value: 131250}, + }, + Labels: apmmodel.StringMap{ + apmmodel.StringMapItem{Key: "id_key", Value: string(cmID[:])}, + apmmodel.StringMapItem{Key: "outcome", Value: string("success")}, + }, + }, + } + for i := 0; i < uniqueEventCount*repCount; i++ { + svcKey := serviceAggregationKey{ + Timestamp: time.Unix(0, 0).UTC(), + ServiceName: fmt.Sprintf("svc%d", i%uniqueServices), + } + txKey := transactionAggregationKey{ + TraceRoot: true, + TransactionName: fmt.Sprintf("foo%d", i%uniqueEventCount), + TransactionType: fmt.Sprintf("txtype%d", i%uniqueEventCount), + EventOutcome: "success", + } + stxKey := serviceTransactionAggregationKey{ + TransactionType: fmt.Sprintf("txtype%d", i%uniqueEventCount), + } + spanKey := spanAggregationKey{ + SpanName: fmt.Sprintf("bar%d", i%uniqueEventCount), + Resource: "test_dest", + } + dssKey := spanAggregationKey{ + SpanName: "", + Resource: fmt.Sprintf("dropped_dest_resource%d", i%uniqueEventCount), + Outcome: "success", + } + expectedCombinedMetrics. + AddServiceMetrics(svcKey). + AddTransaction(txKey, WithTransactionDuration(eventDuration)). + AddServiceTransaction(stxKey, WithTransactionDuration(eventDuration)). + AddSpan(spanKey, WithSpanDuration(eventDuration)). + AddSpan(dssKey, WithSpanDuration(dssDuration)) + } + assert.Empty(t, cmp.Diff( + expectedCombinedMetrics.GetProto(), cm, + append(combinedMetricsSliceSorters, + cmpopts.EquateEmpty(), + cmpopts.EquateApprox(0, 0.01), + cmp.Comparer(func(a, b hdrhistogram.HybridCountsRep) bool { + return a.Equal(&b) + }), + protocmp.Transform(), + )..., + )) + assert.Empty(t, cmp.Diff( + expectedMeasurements, + gatherMetrics( + gatherer, + withIgnoreMetricPrefix("pebble."), + withZeroHistogramValues(true), + ), + cmpopts.IgnoreUnexported(apmmodel.Time{}), + cmpopts.EquateApprox(0, 0.01), + )) +} + +func TestAggregateSpanMetrics(t *testing.T) { + type input struct { + serviceName string + agentName string + destination string + targetType string + targetName string + outcome string + representativeCount float64 + } + + destinationX := "destination-X" + destinationZ := "destination-Z" + trgTypeX := "trg-type-X" + trgNameX := "trg-name-X" + trgTypeZ := "trg-type-Z" + trgNameZ := "trg-name-Z" + defaultLabels := modelpb.Labels{ + "department_name": &modelpb.LabelValue{Global: true, Value: "apm"}, + "organization": &modelpb.LabelValue{Global: true, Value: "observability"}, + "company": &modelpb.LabelValue{Global: true, Value: "elastic"}, + } + defaultNumericLabels := modelpb.NumericLabels{ + "user_id": &modelpb.NumericLabelValue{Global: true, Value: 100}, + "cost_center": &modelpb.NumericLabelValue{Global: true, Value: 10}, + } + + now := time.Now() + for _, tt := range []struct { + name string + inputs []input + getExpectedEvents func(time.Time, time.Duration, time.Duration, int) []*modelpb.APMEvent + }{ + { + name: "with destination and service targets", + inputs: []input{ + {serviceName: "service-A", agentName: "java", destination: destinationZ, targetType: trgTypeZ, targetName: trgNameZ, outcome: "success", representativeCount: 2}, + {serviceName: "service-A", agentName: "java", destination: destinationX, targetType: trgTypeX, targetName: trgNameX, outcome: "success", representativeCount: 1}, + {serviceName: "service-B", agentName: "python", destination: destinationZ, targetType: trgTypeZ, targetName: trgNameZ, outcome: "success", representativeCount: 1}, + {serviceName: "service-A", agentName: "java", destination: destinationZ, targetType: trgTypeZ, targetName: trgNameZ, outcome: "success", representativeCount: 1}, + {serviceName: "service-A", agentName: "java", destination: destinationZ, targetType: trgTypeZ, targetName: trgNameZ, outcome: "success", representativeCount: 0}, + {serviceName: "service-A", agentName: "java", destination: destinationZ, targetType: trgTypeZ, targetName: trgNameZ, outcome: "failure", representativeCount: 1}, + }, + getExpectedEvents: func(ts time.Time, duration, ivl time.Duration, count int) []*modelpb.APMEvent { + return []*modelpb.APMEvent{ + { + Timestamp: modelpb.FromTime(ts.Truncate(ivl)), + Agent: &modelpb.Agent{Name: "java"}, + Service: &modelpb.Service{ + Name: "service-A", + }, + Event: &modelpb.Event{ + Received: modelpb.FromTime(now), + }, + Metricset: &modelpb.Metricset{ + Name: "service_summary", + Interval: formatDuration(ivl), + }, + Labels: defaultLabels, + NumericLabels: defaultNumericLabels, + }, { + Timestamp: modelpb.FromTime(ts.Truncate(ivl)), + Agent: &modelpb.Agent{Name: "python"}, + Service: &modelpb.Service{ + Name: "service-B", + }, + Event: &modelpb.Event{ + Received: modelpb.FromTime(now), + }, + Metricset: &modelpb.Metricset{ + Name: "service_summary", + Interval: formatDuration(ivl), + }, + Labels: defaultLabels, + NumericLabels: defaultNumericLabels, + }, { + Timestamp: modelpb.FromTime(ts.Truncate(ivl)), + Agent: &modelpb.Agent{Name: "java"}, + Service: &modelpb.Service{ + Name: "service-A", + Target: &modelpb.ServiceTarget{ + Type: trgTypeX, + Name: trgNameX, + }, + }, + Event: &modelpb.Event{ + Outcome: "success", + Received: modelpb.FromTime(now), + }, + Metricset: &modelpb.Metricset{ + Name: "service_destination", + Interval: formatDuration(ivl), + DocCount: uint64(count), + }, + Span: &modelpb.Span{ + Name: "service-A:" + destinationX, + DestinationService: &modelpb.DestinationService{ + Resource: destinationX, + ResponseTime: &modelpb.AggregatedDuration{ + Count: uint64(count), + Sum: uint64(time.Duration(count) * duration), + }, + }, + }, + Labels: defaultLabels, + NumericLabels: defaultNumericLabels, + }, { + Timestamp: modelpb.FromTime(ts.Truncate(ivl)), + Agent: &modelpb.Agent{Name: "java"}, + Service: &modelpb.Service{ + Name: "service-A", + Target: &modelpb.ServiceTarget{ + Type: trgTypeZ, + Name: trgNameZ, + }, + }, + Event: &modelpb.Event{ + Outcome: "failure", + Received: modelpb.FromTime(now), + }, + Metricset: &modelpb.Metricset{ + Name: "service_destination", + Interval: formatDuration(ivl), + DocCount: uint64(count), + }, + Span: &modelpb.Span{ + Name: "service-A:" + destinationZ, + DestinationService: &modelpb.DestinationService{ + Resource: destinationZ, + ResponseTime: &modelpb.AggregatedDuration{ + Count: uint64(count), + Sum: uint64(time.Duration(count) * duration), + }, + }, + }, + Labels: defaultLabels, + NumericLabels: defaultNumericLabels, + }, { + Timestamp: modelpb.FromTime(ts.Truncate(ivl)), + Agent: &modelpb.Agent{Name: "java"}, + Service: &modelpb.Service{ + Name: "service-A", + Target: &modelpb.ServiceTarget{ + Type: trgTypeZ, + Name: trgNameZ, + }, + }, + Event: &modelpb.Event{ + Outcome: "success", + Received: modelpb.FromTime(now), + }, + Metricset: &modelpb.Metricset{ + Name: "service_destination", + Interval: formatDuration(ivl), + DocCount: uint64(3 * count), + }, + Span: &modelpb.Span{ + Name: "service-A:" + destinationZ, + DestinationService: &modelpb.DestinationService{ + Resource: destinationZ, + ResponseTime: &modelpb.AggregatedDuration{ + Count: uint64(3 * count), + Sum: uint64(time.Duration(3*count) * duration), + }, + }, + }, + Labels: defaultLabels, + NumericLabels: defaultNumericLabels, + }, { + Timestamp: modelpb.FromTime(ts.Truncate(ivl)), + Agent: &modelpb.Agent{Name: "python"}, + Service: &modelpb.Service{ + Name: "service-B", + Target: &modelpb.ServiceTarget{ + Type: trgTypeZ, + Name: trgNameZ, + }, + }, + Event: &modelpb.Event{ + Outcome: "success", + Received: modelpb.FromTime(now), + }, + Metricset: &modelpb.Metricset{ + Name: "service_destination", + Interval: formatDuration(ivl), + DocCount: uint64(count), + }, + Span: &modelpb.Span{ + Name: "service-B:" + destinationZ, + DestinationService: &modelpb.DestinationService{ + Resource: destinationZ, + ResponseTime: &modelpb.AggregatedDuration{ + Count: uint64(count), + Sum: uint64(time.Duration(count) * duration), + }, + }, + }, + Labels: defaultLabels, + NumericLabels: defaultNumericLabels, + }, + } + }, + }, { + name: "with_no_destination_and_no_service_target", + inputs: []input{ + {serviceName: "service-A", agentName: "java", outcome: "success", representativeCount: 1}, + }, + getExpectedEvents: func(ts time.Time, duration, ivl time.Duration, _ int) []*modelpb.APMEvent { + return []*modelpb.APMEvent{ + { + Timestamp: modelpb.FromTime(ts.Truncate(ivl)), + Agent: &modelpb.Agent{Name: "java"}, + Service: &modelpb.Service{ + Name: "service-A", + }, + Event: &modelpb.Event{ + Received: modelpb.FromTime(now), + }, + Metricset: &modelpb.Metricset{ + Name: "service_summary", + Interval: formatDuration(ivl), + }, + Labels: defaultLabels, + NumericLabels: defaultNumericLabels, + }, + } + }, + }, { + name: "with no destination and a service target", + inputs: []input{ + {serviceName: "service-A", agentName: "java", targetType: trgTypeZ, targetName: trgNameZ, outcome: "success", representativeCount: 1}, + }, + getExpectedEvents: func(ts time.Time, duration, ivl time.Duration, count int) []*modelpb.APMEvent { + return []*modelpb.APMEvent{ + { + Timestamp: modelpb.FromTime(ts.Truncate(ivl)), + Agent: &modelpb.Agent{Name: "java"}, + Service: &modelpb.Service{ + Name: "service-A", + }, + Event: &modelpb.Event{ + Received: modelpb.FromTime(now), + }, + Metricset: &modelpb.Metricset{ + Name: "service_summary", + Interval: formatDuration(ivl), + }, + Labels: defaultLabels, + NumericLabels: defaultNumericLabels, + }, { + Timestamp: modelpb.FromTime(ts.Truncate(ivl)), + Agent: &modelpb.Agent{Name: "java"}, + Service: &modelpb.Service{ + Name: "service-A", + Target: &modelpb.ServiceTarget{ + Type: trgTypeZ, + Name: trgNameZ, + }, + }, + Event: &modelpb.Event{ + Outcome: "success", + Received: modelpb.FromTime(now), + }, + Metricset: &modelpb.Metricset{ + Name: "service_destination", + Interval: formatDuration(ivl), + DocCount: uint64(count), + }, + Span: &modelpb.Span{ + Name: "service-A:", + DestinationService: &modelpb.DestinationService{ + ResponseTime: &modelpb.AggregatedDuration{ + Count: uint64(count), + Sum: uint64(time.Duration(count) * duration), + }, + }, + }, + Labels: defaultLabels, + NumericLabels: defaultNumericLabels, + }, + } + }, + }, { + name: "with a destination and no service target", + inputs: []input{ + {serviceName: "service-A", agentName: "java", destination: destinationZ, outcome: "success", representativeCount: 1}, + }, + getExpectedEvents: func(ts time.Time, duration, ivl time.Duration, count int) []*modelpb.APMEvent { + return []*modelpb.APMEvent{ + { + Timestamp: modelpb.FromTime(ts.Truncate(ivl)), + Agent: &modelpb.Agent{Name: "java"}, + Service: &modelpb.Service{ + Name: "service-A", + }, + Event: &modelpb.Event{ + Received: modelpb.FromTime(now), + }, + Metricset: &modelpb.Metricset{ + Name: "service_summary", + Interval: formatDuration(ivl), + }, + Labels: defaultLabels, + NumericLabels: defaultNumericLabels, + }, { + Timestamp: modelpb.FromTime(ts.Truncate(ivl)), + Agent: &modelpb.Agent{Name: "java"}, + Service: &modelpb.Service{ + Name: "service-A", + }, + Event: &modelpb.Event{ + Outcome: "success", + Received: modelpb.FromTime(now), + }, + Metricset: &modelpb.Metricset{ + Name: "service_destination", + Interval: formatDuration(ivl), + DocCount: uint64(count), + }, + Span: &modelpb.Span{ + Name: "service-A:" + destinationZ, + DestinationService: &modelpb.DestinationService{ + Resource: destinationZ, + ResponseTime: &modelpb.AggregatedDuration{ + Count: uint64(count), + Sum: uint64(time.Duration(count) * duration), + }, + }, + }, + Labels: defaultLabels, + NumericLabels: defaultNumericLabels, + }, + } + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + var actualEvents []*modelpb.APMEvent + aggregationIvls := []time.Duration{time.Minute, 10 * time.Minute, time.Hour} + agg, err := New( + WithLimits(Limits{ + MaxSpanGroups: 1000, + MaxSpanGroupsPerService: 100, + MaxTransactionGroups: 100, + MaxTransactionGroupsPerService: 10, + MaxServiceTransactionGroups: 100, + MaxServiceTransactionGroupsPerService: 10, + MaxServices: 10, + }), + WithAggregationIntervals(aggregationIvls), + WithProcessor(sliceProcessor(&actualEvents)), + WithDataDir(t.TempDir()), + ) + require.NoError(t, err) + + count := 100 + duration := 100 * time.Millisecond + for _, in := range tt.inputs { + span := makeSpan( + now, + in.serviceName, + in.agentName, + in.destination, + in.targetType, + in.targetName, + in.outcome, + duration, + in.representativeCount, + defaultLabels, + defaultNumericLabels, + ) + for i := 0; i < count; i++ { + err := agg.AggregateBatch( + context.Background(), + EncodeToCombinedMetricsKeyID(t, "ab01"), + &modelpb.Batch{span}, + ) + require.NoError(t, err) + } + } + require.NoError(t, agg.Close(context.Background())) + var expectedEvents []*modelpb.APMEvent + for _, ivl := range aggregationIvls { + expectedEvents = append(expectedEvents, tt.getExpectedEvents(now, duration, ivl, count)...) + } + sortKey := func(e *modelpb.APMEvent) string { + var sb strings.Builder + sb.WriteString(e.GetService().GetName()) + sb.WriteString(e.GetAgent().GetName()) + sb.WriteString(e.GetMetricset().GetName()) + sb.WriteString(e.GetMetricset().GetInterval()) + destSvc := e.GetSpan().GetDestinationService() + if destSvc != nil { + sb.WriteString(destSvc.GetResource()) + } + target := e.GetService().GetTarget() + if target != nil { + sb.WriteString(target.GetName()) + sb.WriteString(target.GetType()) + } + sb.WriteString(e.GetEvent().GetOutcome()) + return sb.String() + } + sort.Slice(expectedEvents, func(i, j int) bool { + return sortKey(expectedEvents[i]) < sortKey(expectedEvents[j]) + }) + sort.Slice(actualEvents, func(i, j int) bool { + return sortKey(actualEvents[i]) < sortKey(actualEvents[j]) + }) + assert.Empty(t, cmp.Diff( + expectedEvents, actualEvents, + cmpopts.EquateEmpty(), + cmpopts.IgnoreTypes(netip.Addr{}), + protocmp.Transform(), + protocmp.IgnoreFields(&modelpb.Event{}, "received"), + )) + }) + } +} + +func TestAggregateCombinedMetrics(t *testing.T) { + aggIvl := time.Second + now := time.Now().Truncate(aggIvl) + cmkID := EncodeToCombinedMetricsKeyID(t, "ab01") + + for _, tc := range []struct { + name string + cfgOpts []Option + input []*TestCombinedMetrics + expected []*aggregationpb.CombinedMetrics + expectedOutcome string + eventsCount int + }{ + { + name: "base", + input: []*TestCombinedMetrics{ + NewTestCombinedMetrics( + WithEventsTotal(1), + WithKey(CombinedMetricsKey{ + Interval: aggIvl, + ProcessingTime: now, + ID: cmkID, + }), + ).AddServiceMetrics(serviceAggregationKey{ + Timestamp: now, + ServiceName: "test-svc", + }).AddTransaction(transactionAggregationKey{ + TransactionName: "txntest", + TransactionType: "txntype", + }).AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: "txntype", + }).GetTest(), + + NewTestCombinedMetrics( + WithEventsTotal(1), + WithKey(CombinedMetricsKey{ + Interval: aggIvl, + ProcessingTime: now, + ID: cmkID, + }), + ).AddServiceMetrics(serviceAggregationKey{ + Timestamp: now, + ServiceName: "test-svc", + }).AddSpan(spanAggregationKey{ + SpanName: "spantest", + TargetType: "db", + TargetName: "test", + }, WithSpanDuration(time.Second), WithSpanCount(100)).GetTest(), + }, + expected: []*aggregationpb.CombinedMetrics{ + NewTestCombinedMetrics(WithEventsTotal(2)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: now, + ServiceName: "test-svc", + }). + AddSpan(spanAggregationKey{ + SpanName: "spantest", + TargetType: "db", + TargetName: "test", + }, WithSpanDuration(time.Second), WithSpanCount(100)). + AddTransaction(transactionAggregationKey{ + TransactionName: "txntest", + TransactionType: "txntype", + }). + AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: "txntype", + }).GetProto(), + }, + expectedOutcome: "success", + eventsCount: 2, + }, + { + name: "without_lookback", + input: []*TestCombinedMetrics{ + NewTestCombinedMetrics( + WithEventsTotal(1), + // Key with very old processing time will be dropped if + // it is not within lookback period. + WithKey(CombinedMetricsKey{ + Interval: aggIvl, + ProcessingTime: now.Add(-time.Hour), + ID: cmkID, + }), + ).AddServiceMetrics(serviceAggregationKey{ + Timestamp: now, + ServiceName: "test-svc", + }).AddTransaction(transactionAggregationKey{ + TransactionName: "txntest", + TransactionType: "txntype", + }).AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: "txntype", + }).GetTest(), + }, + expected: []*aggregationpb.CombinedMetrics{}, // metrics are silently dropped + expectedOutcome: "failure", + eventsCount: 1, + }, + { + name: "with_lookback", + cfgOpts: []Option{WithLookback(2 * time.Hour)}, + input: []*TestCombinedMetrics{ + NewTestCombinedMetrics( + WithEventsTotal(1), + // Key with very old processing time will be dropped if + // it is not within lookback period. + WithKey(CombinedMetricsKey{ + Interval: aggIvl, + ProcessingTime: now.Add(-time.Hour), + ID: cmkID, + }), + ).AddServiceMetrics(serviceAggregationKey{ + Timestamp: now, + ServiceName: "test-svc", + }).AddTransaction(transactionAggregationKey{ + TransactionName: "txntest", + TransactionType: "txntype", + }).AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: "txntype", + }).GetTest(), + }, + expected: []*aggregationpb.CombinedMetrics{ + NewTestCombinedMetrics(WithEventsTotal(1)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: now, + ServiceName: "test-svc", + }). + AddTransaction(transactionAggregationKey{ + TransactionName: "txntest", + TransactionType: "txntype", + }). + AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: "txntype", + }).GetProto(), + }, + expectedOutcome: "success", + eventsCount: 1, + }, + } { + t.Run(tc.name, func(t *testing.T) { + var output []*aggregationpb.CombinedMetrics + gatherer, err := apmotel.NewGatherer() + require.NoError(t, err) + mp := metric.NewMeterProvider(metric.WithReader(gatherer)) + agg, err := New(append( + tc.cfgOpts, + WithDataDir(t.TempDir()), + WithAggregationIntervals([]time.Duration{aggIvl}), + WithProcessor(combinedMetricsSliceProcessor(&output)), + WithLimits(Limits{ + MaxServices: 100, + MaxSpanGroups: 100, + MaxSpanGroupsPerService: 100, + MaxTransactionGroups: 100, + MaxTransactionGroupsPerService: 100, + MaxServiceTransactionGroups: 100, + MaxServiceTransactionGroupsPerService: 100, + }), + WithHarvestDelay(time.Hour), + WithMeter(mp.Meter("test")), + )...) + require.NoError(t, err) + + for _, tcm := range tc.input { + err := agg.AggregateCombinedMetrics(context.Background(), tcm.GetKey(), tcm.GetProto()) + require.NoError(t, err) + } + require.NoError(t, agg.Close(context.Background())) + + assert.Empty(t, cmp.Diff( + tc.expected, + output, + append(combinedMetricsSliceSorters, + cmpopts.EquateEmpty(), + cmpopts.EquateApprox(0, 0.01), + cmp.Comparer(func(a, b hdrhistogram.HybridCountsRep) bool { + return a.Equal(&b) + }), + protocmp.Transform(), + )..., + )) + + expectedMeasurements := []apmmodel.Metrics{ + { + Samples: map[string]apmmodel.Metric{ + "events.processed.count": {Value: float64(tc.eventsCount)}, + }, + Labels: apmmodel.StringMap{ + apmmodel.StringMapItem{Key: aggregationIvlKey, Value: formatDuration(aggIvl)}, + apmmodel.StringMapItem{Key: "outcome", Value: tc.expectedOutcome}, + }, + }, + } + assert.Empty(t, cmp.Diff( + expectedMeasurements, + gatherMetrics( + gatherer, + withFilterMetrics([]string{"events.processed.count"}), + withZeroHistogramValues(true), + ), + cmpopts.IgnoreUnexported(apmmodel.Time{}), + )) + }) + } +} + +func TestCombinedMetricsKeyOrdered(t *testing.T) { + // To Allow for retrieving combined metrics by time range, the metrics should + // be ordered by processing time. + ts := time.Now().Add(-time.Hour) + ivl := time.Minute + + cmID := EncodeToCombinedMetricsKeyID(t, "ab01") + before := CombinedMetricsKey{ + ProcessingTime: ts.Truncate(time.Minute), + Interval: ivl, + ID: cmID, + } + beforeBytes := make([]byte, CombinedMetricsKeyEncodedSize) + afterBytes := make([]byte, CombinedMetricsKeyEncodedSize) + + for i := 0; i < 10; i++ { + ts = ts.Add(time.Minute) + cmID = EncodeToCombinedMetricsKeyID(t, fmt.Sprintf("ab%02d", rand.Intn(100))) + after := CombinedMetricsKey{ + ProcessingTime: ts.Truncate(time.Minute), + Interval: ivl, + // combined metrics ID shouldn't matter. Keep length to be + // 5 to ensure it is within expected bounds of the + // sized buffer. + ID: cmID, + } + require.NoError(t, after.MarshalBinaryToSizedBuffer(afterBytes)) + require.NoError(t, before.MarshalBinaryToSizedBuffer(beforeBytes)) + + // before should always come first + assert.Equal(t, -1, pebble.DefaultComparer.Compare(beforeBytes, afterBytes)) + + before = after + } +} + +// Keys should be ordered such that all the partitions for a specific ID is listed +// before any other combined metrics ID. +func TestCombinedMetricsKeyOrderedByProjectID(t *testing.T) { + // To Allow for retrieving combined metrics by time range, the metrics should + // be ordered by processing time. + ts := time.Now().Add(-time.Hour) + ivl := time.Minute + + keyTemplate := CombinedMetricsKey{ + ProcessingTime: ts.Truncate(time.Minute), + Interval: ivl, + } + cmCount := 1000 + pidCount := 500 + keys := make([]CombinedMetricsKey, 0, cmCount*pidCount) + + for i := 0; i < cmCount; i++ { + cmID := EncodeToCombinedMetricsKeyID(t, fmt.Sprintf("ab%06d", i)) + for k := 0; k < pidCount; k++ { + key := keyTemplate + key.PartitionID = uint16(k) + key.ID = cmID + keys = append(keys, key) + } + } + + before := keys[0] + beforeBytes := make([]byte, CombinedMetricsKeyEncodedSize) + afterBytes := make([]byte, CombinedMetricsKeyEncodedSize) + + for i := 1; i < len(keys); i++ { + ts = ts.Add(time.Minute) + after := keys[i] + require.NoError(t, after.MarshalBinaryToSizedBuffer(afterBytes)) + require.NoError(t, before.MarshalBinaryToSizedBuffer(beforeBytes)) + + // before should always come first + if !assert.Equal( + t, -1, + pebble.DefaultComparer.Compare(beforeBytes, afterBytes), + fmt.Sprintf("(%s, %d) should come before (%s, %d)", before.ID, before.PartitionID, after.ID, after.PartitionID), + ) { + assert.FailNow(t, "keys not in expected order") + } + + before = after + } +} + +func TestHarvest(t *testing.T) { + cmCount := 5 + ivls := []time.Duration{time.Second, 2 * time.Second, 4 * time.Second} + m := make(map[time.Duration]map[[16]byte]bool) + processorDone := make(chan struct{}) + processor := func( + _ context.Context, + cmk CombinedMetricsKey, + _ *aggregationpb.CombinedMetrics, + ivl time.Duration, + ) error { + cmMap, ok := m[ivl] + if !ok { + m[ivl] = make(map[[16]byte]bool) + cmMap = m[ivl] + } + // For each unique interval, we should only have a single combined metrics ID + if _, ok := cmMap[cmk.ID]; ok { + assert.FailNow(t, "duplicate combined metrics ID found") + } + cmMap[cmk.ID] = true + // For successful harvest, all combined metrics IDs foreach interval should be + // harvested + if len(m) == len(ivls) { + var remaining bool + for k := range m { + if len(m[k]) != cmCount { + remaining = true + } + } + if !remaining { + close(processorDone) + } + } + return nil + } + gatherer, err := apmotel.NewGatherer() + require.NoError(t, err) + + agg, err := New( + WithDataDir(t.TempDir()), + WithLimits(Limits{ + MaxSpanGroups: 1000, + MaxTransactionGroups: 100, + MaxTransactionGroupsPerService: 10, + MaxServiceTransactionGroups: 100, + MaxServiceTransactionGroupsPerService: 10, + MaxServices: 10, + }), + WithProcessor(processor), + WithAggregationIntervals(ivls), + WithMeter(metric.NewMeterProvider(metric.WithReader(gatherer)).Meter("test")), + WithCombinedMetricsIDToKVs(func(id [16]byte) []attribute.KeyValue { + return []attribute.KeyValue{attribute.String("id_key", string(id[:]))} + }), + ) + require.NoError(t, err) + go func() { + agg.Run(context.Background()) + }() + t.Cleanup(func() { + agg.Close(context.Background()) + }) + + var batch modelpb.Batch + batch = append(batch, &modelpb.APMEvent{ + Transaction: &modelpb.Transaction{ + Name: "txn", + Type: "type", + RepresentativeCount: 1, + }, + }) + expectedMeasurements := make([]apmmodel.Metrics, 0, cmCount+(cmCount*len(ivls))) + for i := 0; i < cmCount; i++ { + cmID := EncodeToCombinedMetricsKeyID(t, fmt.Sprintf("ab%2d", i)) + require.NoError(t, agg.AggregateBatch(context.Background(), cmID, &batch)) + expectedMeasurements = append(expectedMeasurements, apmmodel.Metrics{ + Samples: map[string]apmmodel.Metric{ + "events.processed.bytes": {Value: 252}, + }, + Labels: apmmodel.StringMap{ + apmmodel.StringMapItem{Key: "id_key", Value: string(cmID[:])}, + apmmodel.StringMapItem{Key: "outcome", Value: string("success")}, + }, + }) + for _, ivl := range ivls { + expectedMeasurements = append(expectedMeasurements, apmmodel.Metrics{ + Samples: map[string]apmmodel.Metric{ + "events.processed.count": {Value: float64(len(batch))}, + "events.processed.latency": {Type: "histogram", Counts: []uint64{1}, Values: []float64{0}}, + "events.processed.queued-latency": {Type: "histogram", Counts: []uint64{1}, Values: []float64{0}}, + }, + Labels: apmmodel.StringMap{ + apmmodel.StringMapItem{Key: aggregationIvlKey, Value: ivl.String()}, + apmmodel.StringMapItem{Key: "id_key", Value: string(cmID[:])}, + apmmodel.StringMapItem{Key: "outcome", Value: string("success")}, + }, + }) + } + } + + // The test is designed to timeout if it fails. The test asserts most of the + // logic in processor. If all expected metrics are harvested then the + // processor broadcasts this by closing the processorDone channel and we call + // it a success. If the harvest hasn't finished then the test times out and + // we call it a failure. Due to the nature of how the aggregator works, it is + // possible that this test becomes flaky if there is a bug. + select { + case <-processorDone: + case <-time.After(8 * time.Second): + t.Fatal("harvest didn't finish within expected time") + } + assert.Empty(t, cmp.Diff( + expectedMeasurements, + gatherMetrics( + gatherer, + withIgnoreMetricPrefix("pebble."), + withZeroHistogramValues(true), + ), + cmpopts.IgnoreUnexported(apmmodel.Time{}), + cmpopts.SortSlices(func(a, b apmmodel.Metrics) bool { + if len(a.Labels) != len(b.Labels) { + return len(a.Labels) < len(b.Labels) + } + for i := 0; i < len(a.Labels); i++ { + // assuming keys are ordered + if a.Labels[i].Value != b.Labels[i].Value { + return a.Labels[i].Value < b.Labels[i].Value + } + } + return false + }), + )) +} + +func TestAggregateAndHarvest(t *testing.T) { + txnDuration := 100 * time.Millisecond + batch := modelpb.Batch{ + { + Event: &modelpb.Event{ + Outcome: "success", + Duration: uint64(txnDuration), + }, + Transaction: &modelpb.Transaction{ + Name: "foo", + Type: "txtype", + RepresentativeCount: 1, + }, + Service: &modelpb.Service{Name: "svc"}, + Labels: modelpb.Labels{ + "department_name": &modelpb.LabelValue{Global: true, Value: "apm"}, + "organization": &modelpb.LabelValue{Global: true, Value: "observability"}, + "company": &modelpb.LabelValue{Global: true, Value: "elastic"}, + "mylabel": &modelpb.LabelValue{Global: false, Value: "myvalue"}, + }, + NumericLabels: modelpb.NumericLabels{ + "user_id": &modelpb.NumericLabelValue{Global: true, Value: 100}, + "cost_center": &modelpb.NumericLabelValue{Global: true, Value: 10}, + "mynumericlabel": &modelpb.NumericLabelValue{Global: false, Value: 1}, + }, + }, + } + var events []*modelpb.APMEvent + agg, err := New( + WithDataDir(t.TempDir()), + WithLimits(Limits{ + MaxSpanGroups: 1000, + MaxSpanGroupsPerService: 100, + MaxTransactionGroups: 100, + MaxTransactionGroupsPerService: 10, + MaxServiceTransactionGroups: 100, + MaxServiceTransactionGroupsPerService: 10, + MaxServices: 10, + }), + WithProcessor(sliceProcessor(&events)), + WithAggregationIntervals([]time.Duration{time.Second}), + ) + require.NoError(t, err) + require.NoError(t, agg.AggregateBatch( + context.Background(), + EncodeToCombinedMetricsKeyID(t, "ab01"), + &batch, + )) + require.NoError(t, agg.Close(context.Background())) + + expected := []*modelpb.APMEvent{ + { + Timestamp: modelpb.FromTime(time.Unix(0, 0).UTC()), + Event: &modelpb.Event{ + SuccessCount: &modelpb.SummaryMetric{ + Count: 1, + Sum: 1, + }, + Outcome: "success", + }, + Transaction: &modelpb.Transaction{ + Name: "foo", + Type: "txtype", + Root: true, + DurationSummary: &modelpb.SummaryMetric{ + Count: 1, + Sum: 100351, // Estimate from histogram + }, + DurationHistogram: &modelpb.Histogram{ + Values: []float64{100351}, + Counts: []uint64{1}, + }, + }, + Service: &modelpb.Service{ + Name: "svc", + }, + Labels: modelpb.Labels{ + "department_name": &modelpb.LabelValue{Global: true, Value: "apm"}, + "organization": &modelpb.LabelValue{Global: true, Value: "observability"}, + "company": &modelpb.LabelValue{Global: true, Value: "elastic"}, + }, + NumericLabels: modelpb.NumericLabels{ + "user_id": &modelpb.NumericLabelValue{Global: true, Value: 100}, + "cost_center": &modelpb.NumericLabelValue{Global: true, Value: 10}, + }, + Metricset: &modelpb.Metricset{ + Name: "transaction", + DocCount: 1, + Interval: "1s", + }, + }, + { + Timestamp: modelpb.FromTime(time.Unix(0, 0).UTC()), + Event: &modelpb.Event{}, + Service: &modelpb.Service{ + Name: "svc", + }, + Labels: modelpb.Labels{ + "department_name": &modelpb.LabelValue{Global: true, Value: "apm"}, + "organization": &modelpb.LabelValue{Global: true, Value: "observability"}, + "company": &modelpb.LabelValue{Global: true, Value: "elastic"}, + }, + NumericLabels: modelpb.NumericLabels{ + "user_id": &modelpb.NumericLabelValue{Global: true, Value: 100}, + "cost_center": &modelpb.NumericLabelValue{Global: true, Value: 10}, + }, + Metricset: &modelpb.Metricset{ + Name: "service_summary", + Interval: "1s", + }, + }, + { + Timestamp: modelpb.FromTime(time.Unix(0, 0).UTC()), + Event: &modelpb.Event{ + SuccessCount: &modelpb.SummaryMetric{ + Count: 1, + Sum: 1, + }, + }, + Transaction: &modelpb.Transaction{ + Type: "txtype", + DurationSummary: &modelpb.SummaryMetric{ + Count: 1, + Sum: 100351, // Estimate from histogram + }, + DurationHistogram: &modelpb.Histogram{ + Values: []float64{100351}, + Counts: []uint64{1}, + }, + }, + Service: &modelpb.Service{ + Name: "svc", + }, + Labels: modelpb.Labels{ + "department_name": &modelpb.LabelValue{Global: true, Value: "apm"}, + "organization": &modelpb.LabelValue{Global: true, Value: "observability"}, + "company": &modelpb.LabelValue{Global: true, Value: "elastic"}, + }, + NumericLabels: modelpb.NumericLabels{ + "user_id": &modelpb.NumericLabelValue{Global: true, Value: 100}, + "cost_center": &modelpb.NumericLabelValue{Global: true, Value: 10}, + }, + Metricset: &modelpb.Metricset{ + Name: "service_transaction", + DocCount: 1, + Interval: "1s", + }, + }, + } + assert.Empty(t, cmp.Diff( + expected, + events, + cmpopts.IgnoreTypes(netip.Addr{}), + cmpopts.SortSlices(func(a, b *modelpb.APMEvent) bool { + return a.Metricset.Name < b.Metricset.Name + }), + protocmp.Transform(), + protocmp.IgnoreFields(&modelpb.Event{}, "received"), + )) +} + +func TestHarvestOverflowCount(t *testing.T) { + for _, tc := range []struct { + limits Limits + expectedLogPerService bool + }{ + { + limits: Limits{ + MaxSpanGroups: 4, + MaxSpanGroupsPerService: 4, + MaxTransactionGroups: 3, + MaxTransactionGroupsPerService: 3, + MaxServiceTransactionGroups: 2, + MaxServiceTransactionGroupsPerService: 2, + MaxServices: 1, + }, + expectedLogPerService: true, + }, + { + limits: Limits{ + MaxSpanGroups: 4, + MaxSpanGroupsPerService: 100, + MaxTransactionGroups: 3, + MaxTransactionGroupsPerService: 100, + MaxServiceTransactionGroups: 2, + MaxServiceTransactionGroupsPerService: 100, + MaxServices: 1, + }, + expectedLogPerService: false, + }, + } { + limits := tc.limits + ivls := []time.Duration{time.Minute} + reader := metric.NewManualReader() + meter := metric.NewMeterProvider(metric.WithReader(reader)).Meter("test") + + observedZapCore, observedLogs := observer.New(zap.WarnLevel) + observedLogger := zap.New(observedZapCore) + agg := newTestAggregator(t, + WithLimits(limits), + WithAggregationIntervals(ivls), + WithMeter(meter), + WithCombinedMetricsIDToKVs(func(id [16]byte) []attribute.KeyValue { + return []attribute.KeyValue{attribute.String("id_key", "id_value")} + }), + WithLogger(observedLogger), + WithOverflowLogging(true), + ) + + var batch modelpb.Batch + for i := 0; i < limits.MaxServices+1; i++ { + serviceName := fmt.Sprintf("service_name_%d", i) + for i := 0; i < limits.MaxTransactionGroups+1; i++ { + transactionName := fmt.Sprintf("transaction_name_%d", i) + transactionType := fmt.Sprintf( + "transaction_type_%d", i%(limits.MaxServiceTransactionGroups+1), + ) + batch = append(batch, &modelpb.APMEvent{ + Service: &modelpb.Service{Name: serviceName}, + Transaction: &modelpb.Transaction{ + Name: transactionName, + Type: transactionType, + RepresentativeCount: 1, + }, + }) + } + for i := 0; i < limits.MaxSpanGroups+1; i++ { + serviceTargetName := fmt.Sprintf("service_target_name_%d", i) + batch = append(batch, &modelpb.APMEvent{ + Service: &modelpb.Service{ + Name: serviceName, + Target: &modelpb.ServiceTarget{ + Name: serviceTargetName, + Type: "service_target_type", + }, + }, + Span: &modelpb.Span{ + Name: "span_name", + Type: "span_type", + RepresentativeCount: 1, + }, + }) + } + } + cmID := EncodeToCombinedMetricsKeyID(t, "cm_id") + require.NoError(t, agg.AggregateBatch(context.Background(), cmID, &batch)) + + // Force harvest. + require.NoError(t, agg.Close(context.Background())) + + var resourceMetrics metricdata.ResourceMetrics + require.NoError(t, reader.Collect(context.Background(), &resourceMetrics)) + require.Len(t, resourceMetrics.ScopeMetrics, 1) + scopeMetrics := resourceMetrics.ScopeMetrics[0] + + expected := metricdata.Sum[int64]{ + IsMonotonic: true, + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[int64]{{ + Attributes: attribute.NewSet( + attribute.String(aggregationIvlKey, "1m"), + attribute.String(aggregationTypeKey, "service"), + attribute.String("id_key", "id_value"), + ), + Value: 1, + }, { + Attributes: attribute.NewSet( + attribute.String(aggregationIvlKey, "1m"), + attribute.String(aggregationTypeKey, "service_destination"), + attribute.String("id_key", "id_value"), + ), + Value: int64(limits.MaxSpanGroups) + 2, + }, { + Attributes: attribute.NewSet( + attribute.String(aggregationIvlKey, "1m"), + attribute.String(aggregationTypeKey, "service_transaction"), + attribute.String("id_key", "id_value"), + ), + Value: int64(limits.MaxServiceTransactionGroups) + 2, + }, { + Attributes: attribute.NewSet( + attribute.String(aggregationIvlKey, "1m"), + attribute.String(aggregationTypeKey, "transaction"), + attribute.String("id_key", "id_value"), + ), + Value: int64(limits.MaxTransactionGroups) + 2, + }}, + } + + var found bool + for _, metric := range scopeMetrics.Metrics { + if metric.Name != "metrics.overflowed.count" { + continue + } + metricdatatest.AssertAggregationsEqual( + t, expected, metric.Data, + metricdatatest.IgnoreTimestamp(), + ) + found = true + break + } + assert.True(t, found) + + assert.Len(t, observedLogs.Filter(func(entry observer.LoggedEntry) bool { + return strings.Contains(entry.Message, "Service limit reached") && + entry.ContextMap()["id_key"] == "id_value" + }).All(), 1) + + var expectedLogPerServiceCount, expectedGlobalLogCount int + if tc.expectedLogPerService { + expectedLogPerServiceCount = 1 + expectedGlobalLogCount = 0 + } else { + expectedLogPerServiceCount = 0 + expectedGlobalLogCount = 1 + } + + assert.Len(t, observedLogs.Filter(func(entry observer.LoggedEntry) bool { + return strings.Contains(entry.Message, "Transaction group per service limit reached") && + entry.ContextMap()["id_key"] == "id_value" + }).All(), expectedLogPerServiceCount) + + assert.Len(t, observedLogs.Filter(func(entry observer.LoggedEntry) bool { + return strings.Contains(entry.Message, "Service transaction group per service limit reached") && + entry.ContextMap()["id_key"] == "id_value" + }).All(), expectedLogPerServiceCount) + + assert.Len(t, observedLogs.Filter(func(entry observer.LoggedEntry) bool { + return strings.Contains(entry.Message, "Span group per service limit reached") && + entry.ContextMap()["id_key"] == "id_value" + }).All(), expectedLogPerServiceCount) + + assert.Len(t, observedLogs.Filter(func(entry observer.LoggedEntry) bool { + return strings.Contains(entry.Message, "Overall transaction group limit reached") && + entry.ContextMap()["id_key"] == "id_value" + }).All(), expectedGlobalLogCount) + + assert.Len(t, observedLogs.Filter(func(entry observer.LoggedEntry) bool { + return strings.Contains(entry.Message, "Overall service transaction group limit reached") && + entry.ContextMap()["id_key"] == "id_value" + }).All(), expectedGlobalLogCount) + + assert.Len(t, observedLogs.Filter(func(entry observer.LoggedEntry) bool { + return strings.Contains(entry.Message, "Overall span group limit reached") && + entry.ContextMap()["id_key"] == "id_value" + }).All(), expectedGlobalLogCount) + } +} + +func TestRunStopOrchestration(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var firstHarvestDone atomic.Bool + newAggregator := func() *Aggregator { + agg, err := New( + WithDataDir(t.TempDir()), + WithProcessor(func(_ context.Context, _ CombinedMetricsKey, _ *aggregationpb.CombinedMetrics, _ time.Duration) error { + firstHarvestDone.Swap(true) + return nil + }), + WithAggregationIntervals([]time.Duration{time.Second}), + ) + if err != nil { + t.Fatal("failed to create test aggregator", err) + } + return agg + } + callAggregateBatch := func(agg *Aggregator) error { + return agg.AggregateBatch( + context.Background(), + EncodeToCombinedMetricsKeyID(t, "ab01"), + &modelpb.Batch{ + &modelpb.APMEvent{ + Event: &modelpb.Event{Duration: uint64(time.Millisecond)}, + Transaction: &modelpb.Transaction{ + Name: "T-1000", + Type: "type", + RepresentativeCount: 1, + }, + }, + }, + ) + } + + t.Run("run_before_close", func(t *testing.T) { + agg := newAggregator() + // Should aggregate even without running + assert.NoError(t, callAggregateBatch(agg)) + go func() { agg.Run(ctx) }() + assert.Eventually(t, func() bool { + return firstHarvestDone.Load() + }, 10*time.Second, 10*time.Millisecond, "failed while waiting for first harvest") + assert.NoError(t, callAggregateBatch(agg)) + assert.NoError(t, agg.Close(ctx)) + assert.ErrorIs(t, callAggregateBatch(agg), ErrAggregatorClosed) + }) + t.Run("close_before_run", func(t *testing.T) { + agg := newAggregator() + assert.NoError(t, agg.Close(ctx)) + assert.ErrorIs(t, callAggregateBatch(agg), ErrAggregatorClosed) + assert.ErrorIs(t, agg.Run(ctx), ErrAggregatorClosed) + }) + t.Run("multiple_run", func(t *testing.T) { + agg := newAggregator() + defer agg.Close(ctx) + + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { return agg.Run(ctx) }) + g.Go(func() error { return agg.Run(ctx) }) + err := g.Wait() + assert.Error(t, err) + assert.EqualError(t, err, "aggregator is already running") + }) + t.Run("multiple_close", func(t *testing.T) { + agg := newAggregator() + defer agg.Close(ctx) + go func() { agg.Run(ctx) }() + time.Sleep(time.Second) + + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { return agg.Close(ctx) }) + g.Go(func() error { return agg.Close(ctx) }) + assert.NoError(t, g.Wait()) + }) +} + +func BenchmarkAggregateCombinedMetrics(b *testing.B) { + gatherer, err := apmotel.NewGatherer() + if err != nil { + b.Fatal(err) + } + mp := metric.NewMeterProvider(metric.WithReader(gatherer)) + aggIvl := time.Minute + agg, err := New( + WithDataDir(b.TempDir()), + WithLimits(Limits{ + MaxSpanGroups: 1000, + MaxSpanGroupsPerService: 100, + MaxTransactionGroups: 1000, + MaxTransactionGroupsPerService: 100, + MaxServiceTransactionGroups: 1000, + MaxServiceTransactionGroupsPerService: 100, + MaxServices: 100, + }), + WithProcessor(noOpProcessor()), + WithMeter(mp.Meter("test")), + WithLogger(zap.NewNop()), + ) + if err != nil { + b.Fatal(err) + } + go func() { + agg.Run(context.Background()) + }() + b.Cleanup(func() { + agg.Close(context.Background()) + }) + cmk := CombinedMetricsKey{ + Interval: aggIvl, + ProcessingTime: time.Now().Truncate(aggIvl), + ID: EncodeToCombinedMetricsKeyID(b, "ab01"), + } + cm := NewTestCombinedMetrics(WithEventsTotal(1)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: time.Now(), + ServiceName: "test-svc", + }). + AddTransaction(transactionAggregationKey{ + TransactionName: "txntest", + TransactionType: "txntype", + }). + AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: "txntype", + }). + GetProto() + b.Cleanup(func() { cm.ReturnToVTPool() }) + ctx, cancel := context.WithCancel(context.Background()) + b.Cleanup(func() { cancel() }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := agg.AggregateCombinedMetrics(ctx, cmk, cm); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkAggregateBatchSerial(b *testing.B) { + b.ReportAllocs() + agg := newTestAggregator(b) + defer agg.Close(context.Background()) + batch := newTestBatchForBenchmark() + cmID := EncodeToCombinedMetricsKeyID(b, "ab01") + b.ResetTimer() + + for i := 0; i < b.N; i++ { + if err := agg.AggregateBatch(context.Background(), cmID, batch); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkAggregateBatchParallel(b *testing.B) { + b.ReportAllocs() + agg := newTestAggregator(b) + defer agg.Close(context.Background()) + batch := newTestBatchForBenchmark() + cmID := EncodeToCombinedMetricsKeyID(b, "ab01") + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if err := agg.AggregateBatch(context.Background(), cmID, batch); err != nil { + b.Fatal(err) + } + } + }) +} + +func newTestAggregator(tb testing.TB, opts ...Option) *Aggregator { + agg, err := New(append([]Option{ + WithDataDir(tb.TempDir()), + WithLimits(Limits{ + MaxSpanGroups: 1000, + MaxSpanGroupsPerService: 100, + MaxTransactionGroups: 1000, + MaxTransactionGroupsPerService: 100, + MaxServiceTransactionGroups: 1000, + MaxServiceTransactionGroupsPerService: 100, + MaxServices: 100, + }), + WithProcessor(noOpProcessor()), + WithAggregationIntervals([]time.Duration{time.Second, time.Minute, time.Hour}), + WithLogger(zap.NewNop()), + }, opts...)...) + if err != nil { + tb.Fatal(err) + } + tb.Cleanup(func() { + if err := agg.Close(context.Background()); err != nil { + tb.Fatal(err) + } + }) + return agg +} + +func newTestBatchForBenchmark() *modelpb.Batch { + return &modelpb.Batch{ + &modelpb.APMEvent{ + Event: &modelpb.Event{Duration: uint64(time.Millisecond)}, + Transaction: &modelpb.Transaction{ + Name: "T-1000", + Type: "type", + RepresentativeCount: 1, + }, + }, + } +} + +func noOpProcessor() Processor { + return func(_ context.Context, _ CombinedMetricsKey, _ *aggregationpb.CombinedMetrics, _ time.Duration) error { + return nil + } +} + +func combinedMetricsProcessor(out chan<- *aggregationpb.CombinedMetrics) Processor { + return func( + _ context.Context, + _ CombinedMetricsKey, + cm *aggregationpb.CombinedMetrics, + _ time.Duration, + ) error { + out <- cm.CloneVT() + return nil + } +} + +func combinedMetricsSliceProcessor(slice *[]*aggregationpb.CombinedMetrics) Processor { + return func( + _ context.Context, + _ CombinedMetricsKey, + cm *aggregationpb.CombinedMetrics, + _ time.Duration, + ) error { + *slice = append(*slice, cm.CloneVT()) + return nil + } +} + +func sliceProcessor(slice *[]*modelpb.APMEvent) Processor { + return func( + ctx context.Context, + cmk CombinedMetricsKey, + cm *aggregationpb.CombinedMetrics, + aggregationIvl time.Duration, + ) error { + batch, err := CombinedMetricsToBatch(cm, cmk.ProcessingTime, aggregationIvl) + if err != nil { + return err + } + if batch != nil { + for _, e := range *batch { + *slice = append(*slice, e) + } + } + return nil + } +} + +type gatherMetricsCfg struct { + ignoreMetricPrefix string + filterMetrics map[string]bool + zeroHistogramValues bool +} + +type gatherMetricsOpt func(gatherMetricsCfg) gatherMetricsCfg + +// withFilterMetrics selects a set of metric names from the gathered metrics. +// The filters are applied after withIgnoreMetricPrefix option is applied. +func withFilterMetrics(metrics []string) gatherMetricsOpt { + return func(cfg gatherMetricsCfg) gatherMetricsCfg { + cfg.filterMetrics = make(map[string]bool, len(metrics)) + for _, m := range metrics { + cfg.filterMetrics[m] = true + } + return cfg + } +} + +// withIgnoreMetricPrefix ignores some metric prefixes from the gathered +// metrics. +func withIgnoreMetricPrefix(s string) gatherMetricsOpt { + return func(cfg gatherMetricsCfg) gatherMetricsCfg { + cfg.ignoreMetricPrefix = s + return cfg + } +} + +// withZeroHistogramValues zeroes all histogram values if true. Useful +// for testing where histogram values are harder to estimate correctly. +func withZeroHistogramValues(b bool) gatherMetricsOpt { + return func(cfg gatherMetricsCfg) gatherMetricsCfg { + cfg.zeroHistogramValues = b + return cfg + } +} + +func gatherMetrics(g apm.MetricsGatherer, opts ...gatherMetricsOpt) []apmmodel.Metrics { + var cfg gatherMetricsCfg + for _, opt := range opts { + cfg = opt(cfg) + } + tracer := apmtest.NewRecordingTracer() + defer tracer.Close() + tracer.RegisterMetricsGatherer(g) + tracer.SendMetrics(nil) + metrics := tracer.Payloads().Metrics + for i := range metrics { + metrics[i].Timestamp = apmmodel.Time{} + } + + for _, m := range metrics { + for k, s := range m.Samples { + // Remove internal metrics + if strings.HasPrefix(k, "golang.") || strings.HasPrefix(k, "system.") { + delete(m.Samples, k) + continue + } + // Remove any metrics that has been explicitly ignored + if cfg.ignoreMetricPrefix != "" && strings.HasPrefix(k, cfg.ignoreMetricPrefix) { + delete(m.Samples, k) + continue + } + // If filter metrics option is passed then drop all unfiltered metrics + if len(cfg.filterMetrics) > 0 && !cfg.filterMetrics[k] { + delete(m.Samples, k) + continue + } + // Zero out histogram values if required + if s.Type == "histogram" && cfg.zeroHistogramValues { + for j := range s.Values { + s.Values[j] = 0 + } + } + } + } + // Filter out any metrics with 0 samples + var filledTill int + for i, m := range metrics { + if len(m.Samples) == 0 { + continue + } + if filledTill != i { + metrics[filledTill] = metrics[i] + } + filledTill++ + } + return metrics[:filledTill] +} + +func makeSpan( + ts time.Time, + serviceName, agentName, destinationServiceResource, targetType, targetName, outcome string, + duration time.Duration, + representativeCount float64, + labels modelpb.Labels, + numericLabels modelpb.NumericLabels, +) *modelpb.APMEvent { + event := &modelpb.APMEvent{ + Timestamp: modelpb.FromTime(ts), + Agent: &modelpb.Agent{Name: agentName}, + Service: &modelpb.Service{Name: serviceName}, + Event: &modelpb.Event{ + Received: modelpb.FromTime(time.Now()), + Outcome: outcome, + Duration: uint64(duration), + }, + Span: &modelpb.Span{ + Name: serviceName + ":" + destinationServiceResource, + Type: "type", + RepresentativeCount: representativeCount, + }, + Labels: labels, + NumericLabels: numericLabels, + } + if destinationServiceResource != "" { + event.Span.DestinationService = &modelpb.DestinationService{ + Resource: destinationServiceResource, + } + } + if targetType != "" { + event.Service.Target = &modelpb.ServiceTarget{ + Type: targetType, + Name: targetName, + } + } + return event +} diff --git a/copy/apm-aggregation/aggregators/cachedeventsmap.go b/copy/apm-aggregation/aggregators/cachedeventsmap.go new file mode 100644 index 00000000000..b80807daf13 --- /dev/null +++ b/copy/apm-aggregation/aggregators/cachedeventsmap.go @@ -0,0 +1,77 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "math" + "sync" + "sync/atomic" + "time" +) + +// cachedEventsMap holds a counts of cached events, keyed by interval and ID. +// Cached events are events that have been processed by Aggregate methods, +// but which haven't yet been harvested. Event counts are fractional because +// an event may be spread over multiple partitions. +// +// Access to the map is protected with a mutex. During harvest, an exclusive +// (write) lock is held. Concurrent aggregations may perform atomic updates +// to the map, and the harvester may assume that the map will not be modified +// while it is reading it. +type cachedEventsMap struct { + // (interval, id) -> count + m sync.Map + countPool sync.Pool +} + +func (m *cachedEventsMap) loadAndDelete(end time.Time) map[time.Duration]map[[16]byte]float64 { + loaded := make(map[time.Duration]map[[16]byte]float64) + m.m.Range(func(k, v any) bool { + key := k.(cachedEventsStatsKey) + if !end.Truncate(key.interval).Equal(end) { + return true + } + intervalMetrics, ok := loaded[key.interval] + if !ok { + intervalMetrics = make(map[[16]byte]float64) + loaded[key.interval] = intervalMetrics + } + vscaled := *v.(*uint64) + value := float64(vscaled / math.MaxUint16) + intervalMetrics[key.id] = value + m.m.Delete(k) + m.countPool.Put(v) + return true + }) + return loaded +} + +func (m *cachedEventsMap) add(interval time.Duration, id [16]byte, n float64) { + // We use a pool for the value to minimise allocations, as it will + // always escape to the heap through LoadOrStore. + nscaled, ok := m.countPool.Get().(*uint64) + if !ok { + nscaled = new(uint64) + } + // Scale by the maximum number of partitions to get an integer value, + // for simpler atomic operations. + *nscaled = uint64(n * math.MaxUint16) + key := cachedEventsStatsKey{interval: interval, id: id} + old, loaded := m.m.Load(key) + if !loaded { + old, loaded = m.m.LoadOrStore(key, nscaled) + if !loaded { + // Stored a new value. + return + } + } + atomic.AddUint64(old.(*uint64), *nscaled) + m.countPool.Put(nscaled) +} + +type cachedEventsStatsKey struct { + interval time.Duration + id [16]byte +} diff --git a/copy/apm-aggregation/aggregators/codec.go b/copy/apm-aggregation/aggregators/codec.go new file mode 100644 index 00000000000..80ea4084ea1 --- /dev/null +++ b/copy/apm-aggregation/aggregators/codec.go @@ -0,0 +1,480 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +// TODO(lahsivjar): Add a test using reflect to validate if all +// fields are properly set. + +import ( + "encoding/binary" + "errors" + "slices" + "sort" + "time" + + "github.com/axiomhq/hyperloglog" + + "github.com/elastic/apm-aggregation/aggregationpb" + "github.com/elastic/apm-aggregation/aggregators/internal/hdrhistogram" + "github.com/elastic/apm-aggregation/aggregators/nullable" + "github.com/elastic/apm-data/model/modelpb" +) + +// CombinedMetricsKeyEncodedSize gives the encoded size gives the size of +// CombinedMetricsKey in bytes. The size is used as follows: +// - 2 bytes for interval encoding +// - 8 bytes for timestamp encoding +// - 16 bytes for ID encoding +// - 2 bytes for partition ID +const CombinedMetricsKeyEncodedSize = 28 + +// MarshalBinaryToSizedBuffer will marshal the combined metrics key into +// its binary representation. The encoded byte slice will be used as a +// key in pebbledb. To ensure efficient sorting and time range based +// query, the first 2 bytes of the encoded slice is the aggregation +// interval, the next 8 bytes of the encoded slice is the processing time +// followed by combined metrics ID, the last 2 bytes is the partition ID. +// The binary representation ensures that all entries are ordered by the +// ID first and then ordered by the partition ID. +func (k *CombinedMetricsKey) MarshalBinaryToSizedBuffer(data []byte) error { + ivlSeconds := uint16(k.Interval.Seconds()) + if len(data) != CombinedMetricsKeyEncodedSize { + return errors.New("failed to marshal due to incorrect sized buffer") + } + var offset int + + binary.BigEndian.PutUint16(data[offset:], ivlSeconds) + offset += 2 + + binary.BigEndian.PutUint64(data[offset:], uint64(k.ProcessingTime.Unix())) + offset += 8 + + copy(data[offset:], k.ID[:]) + offset += 16 + + binary.BigEndian.PutUint16(data[offset:], k.PartitionID) + return nil +} + +// UnmarshalBinary will convert the byte encoded data into CombinedMetricsKey. +func (k *CombinedMetricsKey) UnmarshalBinary(data []byte) error { + if len(data) < 12 { + return errors.New("invalid encoded data of insufficient length") + } + var offset int + k.Interval = time.Duration(binary.BigEndian.Uint16(data[offset:2])) * time.Second + offset += 2 + + k.ProcessingTime = time.Unix(int64(binary.BigEndian.Uint64(data[offset:offset+8])), 0) + offset += 8 + + copy(k.ID[:], data[offset:offset+len(k.ID)]) + offset += len(k.ID) + + k.PartitionID = binary.BigEndian.Uint16(data[offset:]) + return nil +} + +// SizeBinary returns the size of the byte array required to encode +// combined metrics key. Encoded size for CombinedMetricsKey is constant +// and alternatively the constant CombinedMetricsKeyEncodedSize can be used. +func (k *CombinedMetricsKey) SizeBinary() int { + return CombinedMetricsKeyEncodedSize +} + +// GetEncodedCombinedMetricsKeyWithoutPartitionID is a util function to +// remove partition bits from an encoded CombinedMetricsKey. +func GetEncodedCombinedMetricsKeyWithoutPartitionID(src []byte) []byte { + var buf [CombinedMetricsKeyEncodedSize]byte + copy(buf[:CombinedMetricsKeyEncodedSize-2], src) + return buf[:] +} + +// ToProto converts CombinedMetrics to its protobuf representation. +func (m *combinedMetrics) ToProto() *aggregationpb.CombinedMetrics { + pb := aggregationpb.CombinedMetrics{} + pb.ServiceMetrics = slices.Grow(pb.ServiceMetrics, len(m.Services))[:len(m.Services)] + var i int + for k, m := range m.Services { + if pb.ServiceMetrics[i] == nil { + pb.ServiceMetrics[i] = &aggregationpb.KeyedServiceMetrics{} + } + pb.ServiceMetrics[i].Key = k.ToProto() + pb.ServiceMetrics[i].Metrics = m.ToProto() + i++ + } + if m.OverflowServicesEstimator != nil { + pb.OverflowServices = m.OverflowServices.ToProto() + pb.OverflowServicesEstimator = hllBytes(m.OverflowServicesEstimator) + } + pb.EventsTotal = m.EventsTotal + pb.YoungestEventTimestamp = m.YoungestEventTimestamp + return &pb +} + +// ToProto converts ServiceAggregationKey to its protobuf representation. +func (k *serviceAggregationKey) ToProto() *aggregationpb.ServiceAggregationKey { + pb := aggregationpb.ServiceAggregationKey{} + pb.Timestamp = modelpb.FromTime(k.Timestamp) + pb.ServiceName = k.ServiceName + pb.ServiceEnvironment = k.ServiceEnvironment + pb.ServiceLanguageName = k.ServiceLanguageName + pb.AgentName = k.AgentName + pb.GlobalLabelsStr = []byte(k.GlobalLabelsStr) + return &pb +} + +// FromProto converts protobuf representation to ServiceAggregationKey. +func (k *serviceAggregationKey) FromProto(pb *aggregationpb.ServiceAggregationKey) { + k.Timestamp = modelpb.ToTime(pb.Timestamp) + k.ServiceName = pb.ServiceName + k.ServiceEnvironment = pb.ServiceEnvironment + k.ServiceLanguageName = pb.ServiceLanguageName + k.AgentName = pb.AgentName + k.GlobalLabelsStr = string(pb.GlobalLabelsStr) +} + +// ToProto converts ServiceMetrics to its protobuf representation. +func (m *serviceMetrics) ToProto() *aggregationpb.ServiceMetrics { + pb := aggregationpb.ServiceMetrics{} + pb.OverflowGroups = m.OverflowGroups.ToProto() + + pb.TransactionMetrics = slices.Grow(pb.TransactionMetrics, len(m.TransactionGroups)) + for _, m := range m.TransactionGroups { + pb.TransactionMetrics = append(pb.TransactionMetrics, m) + } + + pb.ServiceTransactionMetrics = slices.Grow(pb.ServiceTransactionMetrics, len(m.ServiceTransactionGroups)) + for _, m := range m.ServiceTransactionGroups { + pb.ServiceTransactionMetrics = append(pb.ServiceTransactionMetrics, m) + } + + pb.SpanMetrics = slices.Grow(pb.SpanMetrics, len(m.SpanGroups)) + for _, m := range m.SpanGroups { + pb.SpanMetrics = append(pb.SpanMetrics, m) + } + + return &pb +} + +// ToProto converts TransactionAggregationKey to its protobuf representation. +func (k *transactionAggregationKey) ToProto() *aggregationpb.TransactionAggregationKey { + pb := aggregationpb.TransactionAggregationKey{} + pb.TraceRoot = k.TraceRoot + + pb.ContainerId = k.ContainerID + pb.KubernetesPodName = k.KubernetesPodName + + pb.ServiceVersion = k.ServiceVersion + pb.ServiceNodeName = k.ServiceNodeName + + pb.ServiceRuntimeName = k.ServiceRuntimeName + pb.ServiceRuntimeVersion = k.ServiceRuntimeVersion + pb.ServiceLanguageVersion = k.ServiceLanguageVersion + + pb.HostHostname = k.HostHostname + pb.HostName = k.HostName + pb.HostOsPlatform = k.HostOSPlatform + + pb.EventOutcome = k.EventOutcome + + pb.TransactionName = k.TransactionName + pb.TransactionType = k.TransactionType + pb.TransactionResult = k.TransactionResult + + pb.FaasColdstart = uint32(k.FAASColdstart) + pb.FaasId = k.FAASID + pb.FaasName = k.FAASName + pb.FaasVersion = k.FAASVersion + pb.FaasTriggerType = k.FAASTriggerType + + pb.CloudProvider = k.CloudProvider + pb.CloudRegion = k.CloudRegion + pb.CloudAvailabilityZone = k.CloudAvailabilityZone + pb.CloudServiceName = k.CloudServiceName + pb.CloudAccountId = k.CloudAccountID + pb.CloudAccountName = k.CloudAccountName + pb.CloudMachineType = k.CloudMachineType + pb.CloudProjectId = k.CloudProjectID + pb.CloudProjectName = k.CloudProjectName + return &pb +} + +// FromProto converts protobuf representation to TransactionAggregationKey. +func (k *transactionAggregationKey) FromProto(pb *aggregationpb.TransactionAggregationKey) { + k.TraceRoot = pb.TraceRoot + + k.ContainerID = pb.ContainerId + k.KubernetesPodName = pb.KubernetesPodName + + k.ServiceVersion = pb.ServiceVersion + k.ServiceNodeName = pb.ServiceNodeName + + k.ServiceRuntimeName = pb.ServiceRuntimeName + k.ServiceRuntimeVersion = pb.ServiceRuntimeVersion + k.ServiceLanguageVersion = pb.ServiceLanguageVersion + + k.HostHostname = pb.HostHostname + k.HostName = pb.HostName + k.HostOSPlatform = pb.HostOsPlatform + + k.EventOutcome = pb.EventOutcome + + k.TransactionName = pb.TransactionName + k.TransactionType = pb.TransactionType + k.TransactionResult = pb.TransactionResult + + k.FAASColdstart = nullable.Bool(pb.FaasColdstart) + k.FAASID = pb.FaasId + k.FAASName = pb.FaasName + k.FAASVersion = pb.FaasVersion + k.FAASTriggerType = pb.FaasTriggerType + + k.CloudProvider = pb.CloudProvider + k.CloudRegion = pb.CloudRegion + k.CloudAvailabilityZone = pb.CloudAvailabilityZone + k.CloudServiceName = pb.CloudServiceName + k.CloudAccountID = pb.CloudAccountId + k.CloudAccountName = pb.CloudAccountName + k.CloudMachineType = pb.CloudMachineType + k.CloudProjectID = pb.CloudProjectId + k.CloudProjectName = pb.CloudProjectName +} + +// ToProto converts ServiceTransactionAggregationKey to its protobuf representation. +func (k *serviceTransactionAggregationKey) ToProto() *aggregationpb.ServiceTransactionAggregationKey { + pb := aggregationpb.ServiceTransactionAggregationKey{} + pb.TransactionType = k.TransactionType + return &pb +} + +// FromProto converts protobuf representation to ServiceTransactionAggregationKey. +func (k *serviceTransactionAggregationKey) FromProto(pb *aggregationpb.ServiceTransactionAggregationKey) { + k.TransactionType = pb.TransactionType +} + +// ToProto converts SpanAggregationKey to its protobuf representation. +func (k *spanAggregationKey) ToProto() *aggregationpb.SpanAggregationKey { + pb := aggregationpb.SpanAggregationKey{} + pb.SpanName = k.SpanName + pb.Outcome = k.Outcome + + pb.TargetType = k.TargetType + pb.TargetName = k.TargetName + + pb.Resource = k.Resource + return &pb +} + +// FromProto converts protobuf representation to SpanAggregationKey. +func (k *spanAggregationKey) FromProto(pb *aggregationpb.SpanAggregationKey) { + k.SpanName = pb.SpanName + k.Outcome = pb.Outcome + + k.TargetType = pb.TargetType + k.TargetName = pb.TargetName + + k.Resource = pb.Resource +} + +// ToProto converts Overflow to its protobuf representation. +func (o *overflow) ToProto() *aggregationpb.Overflow { + pb := aggregationpb.Overflow{} + if !o.OverflowTransaction.Empty() { + pb.OverflowTransactions = o.OverflowTransaction.Metrics + pb.OverflowTransactionsEstimator = hllBytes(o.OverflowTransaction.Estimator) + } + if !o.OverflowServiceTransaction.Empty() { + pb.OverflowServiceTransactions = o.OverflowServiceTransaction.Metrics + pb.OverflowServiceTransactionsEstimator = hllBytes(o.OverflowServiceTransaction.Estimator) + } + if !o.OverflowSpan.Empty() { + pb.OverflowSpans = o.OverflowSpan.Metrics + pb.OverflowSpansEstimator = hllBytes(o.OverflowSpan.Estimator) + } + return &pb +} + +// FromProto converts protobuf representation to Overflow. +func (o *overflow) FromProto(pb *aggregationpb.Overflow) { + if pb.OverflowTransactions != nil { + o.OverflowTransaction.Estimator = hllSketch(pb.OverflowTransactionsEstimator) + o.OverflowTransaction.Metrics = pb.OverflowTransactions + pb.OverflowTransactions = nil + } + if pb.OverflowServiceTransactions != nil { + o.OverflowServiceTransaction.Estimator = hllSketch(pb.OverflowServiceTransactionsEstimator) + o.OverflowServiceTransaction.Metrics = pb.OverflowServiceTransactions + pb.OverflowServiceTransactions = nil + } + if pb.OverflowSpans != nil { + o.OverflowSpan.Estimator = hllSketch(pb.OverflowSpansEstimator) + o.OverflowSpan.Metrics = pb.OverflowSpans + pb.OverflowSpans = nil + } +} + +// ToProto converts GlobalLabels to its protobuf representation. +func (gl *globalLabels) ToProto() *aggregationpb.GlobalLabels { + pb := aggregationpb.GlobalLabels{} + + // Keys must be sorted to ensure wire formats are deterministically generated and strings are directly comparable + // i.e. Protobuf formats are equal if and only if the structs are equal + pb.Labels = slices.Grow(pb.Labels, len(gl.Labels))[:len(gl.Labels)] + var i int + for k, v := range gl.Labels { + if pb.Labels[i] == nil { + pb.Labels[i] = &aggregationpb.Label{} + } + pb.Labels[i].Key = k + pb.Labels[i].Value = v.Value + pb.Labels[i].Values = slices.Grow(pb.Labels[i].Values, len(v.Values))[:len(v.Values)] + copy(pb.Labels[i].Values, v.Values) + i++ + } + sort.Slice(pb.Labels, func(i, j int) bool { + return pb.Labels[i].Key < pb.Labels[j].Key + }) + + pb.NumericLabels = slices.Grow(pb.NumericLabels, len(gl.NumericLabels))[:len(gl.NumericLabels)] + i = 0 + for k, v := range gl.NumericLabels { + if pb.NumericLabels[i] == nil { + pb.NumericLabels[i] = &aggregationpb.NumericLabel{} + } + pb.NumericLabels[i].Key = k + pb.NumericLabels[i].Value = v.Value + pb.NumericLabels[i].Values = slices.Grow(pb.NumericLabels[i].Values, len(v.Values))[:len(v.Values)] + copy(pb.NumericLabels[i].Values, v.Values) + i++ + } + sort.Slice(pb.NumericLabels, func(i, j int) bool { + return pb.NumericLabels[i].Key < pb.NumericLabels[j].Key + }) + + return &pb +} + +// FromProto converts protobuf representation to globalLabels. +func (gl *globalLabels) FromProto(pb *aggregationpb.GlobalLabels) { + gl.Labels = make(modelpb.Labels, len(pb.Labels)) + for _, l := range pb.Labels { + gl.Labels[l.Key] = &modelpb.LabelValue{Value: l.Value, Global: true} + gl.Labels[l.Key].Values = slices.Grow(gl.Labels[l.Key].Values, len(l.Values))[:len(l.Values)] + copy(gl.Labels[l.Key].Values, l.Values) + } + gl.NumericLabels = make(modelpb.NumericLabels, len(pb.NumericLabels)) + for _, l := range pb.NumericLabels { + gl.NumericLabels[l.Key] = &modelpb.NumericLabelValue{Value: l.Value, Global: true} + gl.NumericLabels[l.Key].Values = slices.Grow(gl.NumericLabels[l.Key].Values, len(l.Values))[:len(l.Values)] + copy(gl.NumericLabels[l.Key].Values, l.Values) + } +} + +// MarshalBinary marshals globalLabels to binary using protobuf. +func (gl *globalLabels) MarshalBinary() ([]byte, error) { + if gl.Labels == nil && gl.NumericLabels == nil { + return nil, nil + } + pb := gl.ToProto() + return pb.MarshalVT() +} + +// MarshalString marshals globalLabels to string from binary using protobuf. +func (gl *globalLabels) MarshalString() (string, error) { + b, err := gl.MarshalBinary() + return string(b), err +} + +// UnmarshalBinary unmarshals binary protobuf to globalLabels. +func (gl *globalLabels) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + gl.Labels = nil + gl.NumericLabels = nil + return nil + } + pb := aggregationpb.GlobalLabels{} + if err := pb.UnmarshalVT(data); err != nil { + return err + } + gl.FromProto(&pb) + return nil +} + +// UnmarshalString unmarshals string of binary protobuf to globalLabels. +func (gl *globalLabels) UnmarshalString(data string) error { + return gl.UnmarshalBinary([]byte(data)) +} + +func histogramFromProto(h *hdrhistogram.HistogramRepresentation, pb *aggregationpb.HDRHistogram) { + if pb == nil { + return + } + h.LowestTrackableValue = pb.LowestTrackableValue + h.HighestTrackableValue = pb.HighestTrackableValue + h.SignificantFigures = pb.SignificantFigures + h.CountsRep.Reset() + + for i := 0; i < len(pb.Buckets); i++ { + h.CountsRep.Add(pb.Buckets[i], pb.Counts[i]) + } +} + +func histogramToProto(h *hdrhistogram.HistogramRepresentation) *aggregationpb.HDRHistogram { + if h == nil { + return nil + } + pb := aggregationpb.HDRHistogram{} + setHistogramProto(h, &pb) + return &pb +} + +func setHistogramProto(h *hdrhistogram.HistogramRepresentation, pb *aggregationpb.HDRHistogram) { + pb.LowestTrackableValue = h.LowestTrackableValue + pb.HighestTrackableValue = h.HighestTrackableValue + pb.SignificantFigures = h.SignificantFigures + pb.Buckets = pb.Buckets[:0] + pb.Counts = pb.Counts[:0] + countsLen := h.CountsRep.Len() + if countsLen > cap(pb.Buckets) { + pb.Buckets = make([]int32, 0, countsLen) + } + if countsLen > cap(pb.Counts) { + pb.Counts = make([]int64, 0, countsLen) + } + h.CountsRep.ForEach(func(bucket int32, count int64) { + pb.Buckets = append(pb.Buckets, bucket) + pb.Counts = append(pb.Counts, count) + }) +} + +func hllBytes(estimator *hyperloglog.Sketch) []byte { + if estimator == nil { + return nil + } + // Ignoring error here since error will always be nil + b, _ := estimator.MarshalBinary() + return b +} + +// hllSketchEstimate returns hllSketch(estimator).Estimate() if estimator is +// non-nil, and zero if estimator is nil. +func hllSketchEstimate(estimator []byte) uint64 { + if sketch := hllSketch(estimator); sketch != nil { + return sketch.Estimate() + } + return 0 +} + +func hllSketch(estimator []byte) *hyperloglog.Sketch { + if len(estimator) == 0 { + return nil + } + var sketch hyperloglog.Sketch + // Ignoring returned error here since the error is only returned if + // the precision is set outside bounds which is not possible for our case. + sketch.UnmarshalBinary(estimator) + return &sketch +} diff --git a/copy/apm-aggregation/aggregators/codec_test.go b/copy/apm-aggregation/aggregators/codec_test.go new file mode 100644 index 00000000000..2bfe8a548ef --- /dev/null +++ b/copy/apm-aggregation/aggregators/codec_test.go @@ -0,0 +1,136 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/assert" + + "github.com/elastic/apm-aggregation/aggregators/internal/hdrhistogram" + "github.com/elastic/apm-data/model/modelpb" +) + +func TestCombinedMetricsKey(t *testing.T) { + expected := CombinedMetricsKey{ + Interval: time.Minute, + ProcessingTime: time.Now().Truncate(time.Minute), + ID: EncodeToCombinedMetricsKeyID(t, "ab01"), + } + data := make([]byte, CombinedMetricsKeyEncodedSize) + assert.NoError(t, expected.MarshalBinaryToSizedBuffer(data)) + var actual CombinedMetricsKey + assert.NoError(t, (&actual).UnmarshalBinary(data)) + assert.Empty(t, cmp.Diff(expected, actual)) +} + +func TestGetEncodedCombinedMetricsKeyWithoutPartitionID(t *testing.T) { + key := CombinedMetricsKey{ + Interval: time.Minute, + ProcessingTime: time.Now().Truncate(time.Minute), + ID: EncodeToCombinedMetricsKeyID(t, "ab01"), + PartitionID: 11, + } + var encoded [CombinedMetricsKeyEncodedSize]byte + assert.NoError(t, key.MarshalBinaryToSizedBuffer(encoded[:])) + + key.PartitionID = 0 + var expected [CombinedMetricsKeyEncodedSize]byte + assert.NoError(t, key.MarshalBinaryToSizedBuffer(expected[:])) + + assert.Equal( + t, + expected[:], + GetEncodedCombinedMetricsKeyWithoutPartitionID(encoded[:]), + ) +} + +func TestGlobalLabels(t *testing.T) { + expected := globalLabels{ + Labels: map[string]*modelpb.LabelValue{ + "lb01": { + Values: []string{"test01", "test02"}, + Global: true, + }, + }, + NumericLabels: map[string]*modelpb.NumericLabelValue{ + "nlb01": { + Values: []float64{0.1, 0.2}, + Global: true, + }, + }, + } + str, err := expected.MarshalString() + assert.NoError(t, err) + var actual globalLabels + assert.NoError(t, actual.UnmarshalString(str)) + assert.Empty(t, cmp.Diff( + expected, actual, + cmpopts.IgnoreUnexported( + modelpb.LabelValue{}, + modelpb.NumericLabelValue{}, + ), + )) +} + +func TestHistogramRepresentation(t *testing.T) { + expected := hdrhistogram.New() + expected.RecordDuration(time.Minute, 2) + + actual := hdrhistogram.New() + histogramFromProto(actual, histogramToProto(expected)) + assert.Empty(t, cmp.Diff( + expected, actual, + cmp.Comparer(func(a, b hdrhistogram.HybridCountsRep) bool { + return a.Equal(&b) + }), + )) +} + +func BenchmarkCombinedMetricsEncoding(b *testing.B) { + b.ReportAllocs() + ts := time.Now() + cardinality := 10 + tcm := NewTestCombinedMetrics() + sm := tcm.AddServiceMetrics(serviceAggregationKey{ + Timestamp: ts, + ServiceName: "bench", + }) + for i := 0; i < cardinality; i++ { + txnName := fmt.Sprintf("txn%d", i) + txnType := fmt.Sprintf("typ%d", i) + spanName := fmt.Sprintf("spn%d", i) + + sm.AddTransaction(transactionAggregationKey{ + TransactionName: txnName, + TransactionType: txnType, + }, WithTransactionCount(200)) + sm.AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: txnType, + }, WithTransactionCount(200)) + sm.AddSpan(spanAggregationKey{ + SpanName: spanName, + }) + } + cm := tcm.Get() + b.ResetTimer() + for i := 0; i < b.N; i++ { + cmproto := cm.ToProto() + cmproto.ReturnToVTPool() + } +} + +func EncodeToCombinedMetricsKeyID(tb testing.TB, s string) [16]byte { + var b [16]byte + if len(s) > len(b) { + tb.Fatal("invalid key length passed") + } + copy(b[len(b)-len(s):], s) + return b +} diff --git a/copy/apm-aggregation/aggregators/combined_metrics_test.go b/copy/apm-aggregation/aggregators/combined_metrics_test.go new file mode 100644 index 00000000000..3ad4dfe8aa8 --- /dev/null +++ b/copy/apm-aggregation/aggregators/combined_metrics_test.go @@ -0,0 +1,410 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "time" + + "github.com/cespare/xxhash/v2" + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/testing/protocmp" + + "github.com/elastic/apm-aggregation/aggregationpb" + "github.com/elastic/apm-aggregation/aggregators/internal/hdrhistogram" + "github.com/elastic/apm-aggregation/aggregators/internal/protohash" + "github.com/elastic/apm-data/model/modelpb" +) + +type TestCombinedMetricsCfg struct { + key CombinedMetricsKey + eventsTotal float64 + youngestEventTimestamp time.Time +} + +type TestCombinedMetricsOpt func(cfg TestCombinedMetricsCfg) TestCombinedMetricsCfg + +func WithKey(key CombinedMetricsKey) TestCombinedMetricsOpt { + return func(cfg TestCombinedMetricsCfg) TestCombinedMetricsCfg { + cfg.key = key + return cfg + } +} + +func WithEventsTotal(total float64) TestCombinedMetricsOpt { + return func(cfg TestCombinedMetricsCfg) TestCombinedMetricsCfg { + cfg.eventsTotal = total + return cfg + } +} + +func WithYoungestEventTimestamp(ts time.Time) TestCombinedMetricsOpt { + return func(cfg TestCombinedMetricsCfg) TestCombinedMetricsCfg { + cfg.youngestEventTimestamp = ts + return cfg + } +} + +var defaultTestCombinedMetricsCfg = TestCombinedMetricsCfg{ + eventsTotal: 1, + youngestEventTimestamp: time.Unix(0, 0).UTC(), +} + +type TestTransactionCfg struct { + duration time.Duration + count int + // outcome is used for service transaction as transaction already + // have `EventOutcome` in their key. For transactions this field + // will automatically be overriden based on the key value. + outcome string +} + +type TestTransactionOpt func(TestTransactionCfg) TestTransactionCfg + +func WithTransactionDuration(d time.Duration) TestTransactionOpt { + return func(cfg TestTransactionCfg) TestTransactionCfg { + cfg.duration = d + return cfg + } +} + +func WithTransactionCount(c int) TestTransactionOpt { + return func(cfg TestTransactionCfg) TestTransactionCfg { + cfg.count = c + return cfg + } +} + +// WithEventOutcome is used to specify the event outcome for building +// test service transaction metrics. If it is specified for building +// test transaction metrics then it will be overridden based on the +// `EventOutcome` in the transaction aggregation key. +func WithEventOutcome(o string) TestTransactionOpt { + return func(cfg TestTransactionCfg) TestTransactionCfg { + cfg.outcome = o + return cfg + } +} + +var defaultTestTransactionCfg = TestTransactionCfg{ + duration: time.Second, + count: 1, + outcome: "success", +} + +type TestSpanCfg struct { + duration time.Duration + count int +} + +type TestSpanOpt func(TestSpanCfg) TestSpanCfg + +func WithSpanDuration(d time.Duration) TestSpanOpt { + return func(cfg TestSpanCfg) TestSpanCfg { + cfg.duration = d + return cfg + } +} + +func WithSpanCount(c int) TestSpanOpt { + return func(cfg TestSpanCfg) TestSpanCfg { + cfg.count = c + return cfg + } +} + +var defaultTestSpanCfg = TestSpanCfg{ + duration: time.Nanosecond, // for backward compatibility with previous tests + count: 1, +} + +// TestCombinedMetrics creates combined metrics for testing. The creation logic +// is arranged in a way to allow chained creation and addition of leaf nodes +// to combined metrics. +type TestCombinedMetrics struct { + key CombinedMetricsKey + value *combinedMetrics +} + +func NewTestCombinedMetrics(opts ...TestCombinedMetricsOpt) *TestCombinedMetrics { + cfg := defaultTestCombinedMetricsCfg + for _, opt := range opts { + cfg = opt(cfg) + } + var cm combinedMetrics + cm.EventsTotal = cfg.eventsTotal + cm.YoungestEventTimestamp = modelpb.FromTime(cfg.youngestEventTimestamp) + cm.Services = make(map[serviceAggregationKey]serviceMetrics) + return &TestCombinedMetrics{ + key: cfg.key, + value: &cm, + } +} + +func (tcm *TestCombinedMetrics) GetProto() *aggregationpb.CombinedMetrics { + return tcm.value.ToProto() +} + +func (tcm *TestCombinedMetrics) Get() combinedMetrics { + return *tcm.value +} + +func (tcm *TestCombinedMetrics) GetKey() CombinedMetricsKey { + return tcm.key +} + +type TestServiceMetrics struct { + sk serviceAggregationKey + tcm *TestCombinedMetrics + overflow bool // indicates if the service has overflowed to global +} + +func (tcm *TestCombinedMetrics) AddServiceMetrics( + sk serviceAggregationKey, +) *TestServiceMetrics { + if _, ok := tcm.value.Services[sk]; !ok { + tcm.value.Services[sk] = newServiceMetrics() + } + return &TestServiceMetrics{sk: sk, tcm: tcm} +} + +func (tcm *TestCombinedMetrics) AddServiceMetricsOverflow( + sk serviceAggregationKey, +) *TestServiceMetrics { + if _, ok := tcm.value.Services[sk]; ok { + panic("service already added as non overflow") + } + + hash := protohash.HashServiceAggregationKey(xxhash.Digest{}, sk.ToProto()) + insertHash(&tcm.value.OverflowServicesEstimator, hash.Sum64()) + + // Does not save to a map, any service instance added to this will + // automatically be overflowed to the global overflow bucket. + return &TestServiceMetrics{sk: sk, tcm: tcm, overflow: true} +} + +func (tsm *TestServiceMetrics) AddTransaction( + tk transactionAggregationKey, + opts ...TestTransactionOpt, +) *TestServiceMetrics { + if tsm.overflow { + panic("cannot add transaction to overflowed service transaction") + } + cfg := defaultTestTransactionCfg + for _, opt := range opts { + cfg = opt(cfg) + } + cfg.outcome = tk.EventOutcome + + hdr := hdrhistogram.New() + hdr.RecordDuration(cfg.duration, float64(cfg.count)) + ktm := aggregationpb.KeyedTransactionMetricsFromVTPool() + ktm.Key = tk.ToProto() + ktm.Metrics = aggregationpb.TransactionMetricsFromVTPool() + ktm.Metrics.Histogram = histogramToProto(hdr) + + svc := tsm.tcm.value.Services[tsm.sk] + if oldKtm, ok := svc.TransactionGroups[tk]; ok { + mergeKeyedTransactionMetrics(oldKtm, ktm) + ktm = oldKtm + } + svc.TransactionGroups[tk] = ktm + return tsm +} + +func (tsm *TestServiceMetrics) AddTransactionOverflow( + tk transactionAggregationKey, + opts ...TestTransactionOpt, +) *TestServiceMetrics { + cfg := defaultTestTransactionCfg + for _, opt := range opts { + cfg = opt(cfg) + } + cfg.outcome = tk.EventOutcome + + hdr := hdrhistogram.New() + hdr.RecordDuration(cfg.duration, float64(cfg.count)) + from := aggregationpb.TransactionMetricsFromVTPool() + from.Histogram = histogramToProto(hdr) + + hash := protohash.HashTransactionAggregationKey( + protohash.HashServiceAggregationKey(xxhash.Digest{}, tsm.sk.ToProto()), + tk.ToProto(), + ) + if tsm.overflow { + // Global overflow + tsm.tcm.value.OverflowServices.OverflowTransaction.Merge(from, hash.Sum64()) + } else { + // Per service overflow + svc := tsm.tcm.value.Services[tsm.sk] + svc.OverflowGroups.OverflowTransaction.Merge(from, hash.Sum64()) + tsm.tcm.value.Services[tsm.sk] = svc + } + return tsm +} + +func (tsm *TestServiceMetrics) AddServiceTransaction( + stk serviceTransactionAggregationKey, + opts ...TestTransactionOpt, +) *TestServiceMetrics { + cfg := defaultTestTransactionCfg + for _, opt := range opts { + cfg = opt(cfg) + } + + hdr := hdrhistogram.New() + hdr.RecordDuration(cfg.duration, float64(cfg.count)) + kstm := aggregationpb.KeyedServiceTransactionMetricsFromVTPool() + kstm.Key = stk.ToProto() + kstm.Metrics = aggregationpb.ServiceTransactionMetricsFromVTPool() + kstm.Metrics.Histogram = histogramToProto(hdr) + switch cfg.outcome { + case "failure": + kstm.Metrics.FailureCount = float64(cfg.count) + case "success": + kstm.Metrics.SuccessCount = float64(cfg.count) + } + + svc := tsm.tcm.value.Services[tsm.sk] + if oldKstm, ok := svc.ServiceTransactionGroups[stk]; ok { + mergeKeyedServiceTransactionMetrics(oldKstm, kstm) + kstm = oldKstm + } + svc.ServiceTransactionGroups[stk] = kstm + return tsm +} + +func (tsm *TestServiceMetrics) AddServiceTransactionOverflow( + stk serviceTransactionAggregationKey, + opts ...TestTransactionOpt, +) *TestServiceMetrics { + cfg := defaultTestTransactionCfg + for _, opt := range opts { + cfg = opt(cfg) + } + + hdr := hdrhistogram.New() + hdr.RecordDuration(cfg.duration, float64(cfg.count)) + from := aggregationpb.ServiceTransactionMetricsFromVTPool() + from.Histogram = histogramToProto(hdr) + switch cfg.outcome { + case "failure": + from.FailureCount = float64(cfg.count) + case "success": + from.SuccessCount = float64(cfg.count) + } + + hash := protohash.HashServiceTransactionAggregationKey( + protohash.HashServiceAggregationKey(xxhash.Digest{}, tsm.sk.ToProto()), + stk.ToProto(), + ) + if tsm.overflow { + // Global overflow + tsm.tcm.value.OverflowServices.OverflowServiceTransaction.Merge(from, hash.Sum64()) + } else { + // Per service overflow + svc := tsm.tcm.value.Services[tsm.sk] + svc.OverflowGroups.OverflowServiceTransaction.Merge(from, hash.Sum64()) + tsm.tcm.value.Services[tsm.sk] = svc + } + return tsm +} + +func (tsm *TestServiceMetrics) AddSpan( + spk spanAggregationKey, + opts ...TestSpanOpt, +) *TestServiceMetrics { + cfg := defaultTestSpanCfg + for _, opt := range opts { + cfg = opt(cfg) + } + + ksm := aggregationpb.KeyedSpanMetricsFromVTPool() + ksm.Key = spk.ToProto() + ksm.Metrics = aggregationpb.SpanMetricsFromVTPool() + ksm.Metrics.Sum += float64(cfg.duration * time.Duration(cfg.count)) + ksm.Metrics.Count += float64(cfg.count) + + svc := tsm.tcm.value.Services[tsm.sk] + if oldKsm, ok := svc.SpanGroups[spk]; ok { + mergeKeyedSpanMetrics(oldKsm, ksm) + ksm = oldKsm + } + svc.SpanGroups[spk] = ksm + return tsm +} + +func (tsm *TestServiceMetrics) AddSpanOverflow( + spk spanAggregationKey, + opts ...TestSpanOpt, +) *TestServiceMetrics { + cfg := defaultTestSpanCfg + for _, opt := range opts { + cfg = opt(cfg) + } + + from := aggregationpb.SpanMetricsFromVTPool() + from.Sum += float64(cfg.duration * time.Duration(cfg.count)) + from.Count += float64(cfg.count) + + hash := protohash.HashSpanAggregationKey( + protohash.HashServiceAggregationKey(xxhash.Digest{}, tsm.sk.ToProto()), + spk.ToProto(), + ) + if tsm.overflow { + // Global overflow + tsm.tcm.value.OverflowServices.OverflowSpan.Merge(from, hash.Sum64()) + } else { + // Per service overflow + svc := tsm.tcm.value.Services[tsm.sk] + svc.OverflowGroups.OverflowSpan.Merge(from, hash.Sum64()) + tsm.tcm.value.Services[tsm.sk] = svc + } + return tsm +} + +func (tsm *TestServiceMetrics) GetProto() *aggregationpb.CombinedMetrics { + return tsm.tcm.GetProto() +} + +func (tsm *TestServiceMetrics) Get() combinedMetrics { + return tsm.tcm.Get() +} + +func (tsm *TestServiceMetrics) GetTest() *TestCombinedMetrics { + return tsm.tcm +} + +// Set of cmp options to sort combined metrics based on key hash. Hash collisions +// are not considered. +var combinedMetricsSliceSorters = []cmp.Option{ + protocmp.SortRepeated(func(a, b *aggregationpb.KeyedServiceMetrics) bool { + return xxhashDigestLess( + protohash.HashServiceAggregationKey(xxhash.Digest{}, a.Key), + protohash.HashServiceAggregationKey(xxhash.Digest{}, b.Key), + ) + }), + protocmp.SortRepeated(func(a, b *aggregationpb.KeyedTransactionMetrics) bool { + return xxhashDigestLess( + protohash.HashTransactionAggregationKey(xxhash.Digest{}, a.Key), + protohash.HashTransactionAggregationKey(xxhash.Digest{}, b.Key), + ) + }), + protocmp.SortRepeated(func(a, b *aggregationpb.KeyedServiceTransactionMetrics) bool { + return xxhashDigestLess( + protohash.HashServiceTransactionAggregationKey(xxhash.Digest{}, a.Key), + protohash.HashServiceTransactionAggregationKey(xxhash.Digest{}, b.Key), + ) + }), + protocmp.SortRepeated(func(a, b *aggregationpb.KeyedSpanMetrics) bool { + return xxhashDigestLess( + protohash.HashSpanAggregationKey(xxhash.Digest{}, a.Key), + protohash.HashSpanAggregationKey(xxhash.Digest{}, b.Key), + ) + }), +} + +func xxhashDigestLess(a, b xxhash.Digest) bool { + return a.Sum64() < b.Sum64() +} diff --git a/copy/apm-aggregation/aggregators/config.go b/copy/apm-aggregation/aggregators/config.go new file mode 100644 index 00000000000..da719e47364 --- /dev/null +++ b/copy/apm-aggregation/aggregators/config.go @@ -0,0 +1,279 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "context" + "errors" + "fmt" + "sort" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "github.com/elastic/apm-aggregation/aggregationpb" +) + +const instrumentationName = "aggregators" + +// Processor defines handling of the aggregated metrics post harvest. +// CombinedMetrics passed to the processor is pooled and it is released +// back to the pool after processor has returned. If the processor mutates +// the CombinedMetrics such that it can no longer access the pooled objects, +// then the Processor should release the objects back to the pool. +type Processor func( + ctx context.Context, + cmk CombinedMetricsKey, + cm *aggregationpb.CombinedMetrics, + aggregationIvl time.Duration, +) error + +// config contains the required config for running the aggregator. +type config struct { + DataDir string + Limits Limits + Processor Processor + Partitions uint16 + AggregationIntervals []time.Duration + HarvestDelay time.Duration + Lookback time.Duration + CombinedMetricsIDToKVs func([16]byte) []attribute.KeyValue + InMemory bool + + Meter metric.Meter + Tracer trace.Tracer + Logger *zap.Logger + OverflowLogging bool +} + +// Option allows configuring aggregator based on functional options. +type Option func(config) config + +// NewConfig creates a new aggregator config based on the passed options. +func newConfig(opts ...Option) (config, error) { + cfg := defaultCfg() + for _, opt := range opts { + cfg = opt(cfg) + } + return cfg, validateCfg(cfg) +} + +// WithDataDir configures the data directory to be used by the database. +func WithDataDir(dataDir string) Option { + return func(c config) config { + c.DataDir = dataDir + return c + } +} + +// WithLimits configures the limits to be used by the aggregator. +func WithLimits(limits Limits) Option { + return func(c config) config { + c.Limits = limits + return c + } +} + +// WithProcessor configures the processor for handling of the aggregated +// metrics post harvest. Processor is called for each decoded combined +// metrics after they are harvested. CombinedMetrics passed to the +// processor is pooled and it is releasd back to the pool after processor +// has returned. If the processor mutates the CombinedMetrics such that it +// can no longer access the pooled objects, then the Processor should +// release the objects back to the pool. +func WithProcessor(processor Processor) Option { + return func(c config) config { + c.Processor = processor + return c + } +} + +// WithPartitions configures the number of partitions for combined metrics +// written to pebble. Defaults to 1. +// +// Partition IDs are encoded in a way that all the partitions of a specific +// combined metric are listed before any other if compared using the bytes +// comparer. +func WithPartitions(n uint16) Option { + return func(c config) config { + c.Partitions = n + return c + } +} + +// WithAggregationIntervals defines the intervals that aggregator will +// aggregate for. +func WithAggregationIntervals(aggIvls []time.Duration) Option { + return func(c config) config { + c.AggregationIntervals = aggIvls + return c + } +} + +// WithHarvestDelay delays the harvest by the configured duration. +// This means that harvest for a specific processing time would be +// performed with the given delay. +// +// Without delay, a normal harvest schedule will harvest metrics +// aggregated for processing time, say `t0`, at time `t1`, where +// `t1 = t0 + aggregation_interval`. With delay of, say `d`, the +// harvester will harvest the metrics for `t0` at `t1 + d`. In +// addition to harvest the duration for which the metrics are +// aggregated by the AggregateBatch API will also be affected. +// +// The main purpose of the delay is to handle the latency of +// receiving the l1 aggregated metrics in l2 aggregation. Thus +// the value must be configured for the l2 aggregator and is +// not required for l1 aggregator. If used as such then the +// harvest delay has no effects on the duration for which the +// metrics are aggregated. This is because AggregateBatch API is +// not used by the l2 aggregator. +func WithHarvestDelay(delay time.Duration) Option { + return func(c config) config { + c.HarvestDelay = delay + return c + } +} + +// WithLookback configures the maximum duration that the +// aggregator will use to query the database during harvest time +// in addition to the original period derived from aggregation +// interval i.e. the harvest interval for each aggregation interval +// will be defined as [end-Lookback-AggregationIvl, end). +// +// The main purpose of Lookback is to protect against data loss for +// multi level deployments of aggregators where AggregateCombinedMetrics +// is used to aggregate partial aggregates. In these cases, the +// Lookback configuration can protect against data loss due to +// delayed partial aggregates. Note that these delayed partial +// aggregates will only be aggregated with other delayed partial +// aggregates and thus we can have multiple aggregated metrics for +// the same CombinedMetricsKey{Interval, ProcessingTime, ID}. +func WithLookback(lookback time.Duration) Option { + return func(c config) config { + c.Lookback = lookback + return c + } +} + +// WithMeter defines a custom meter which will be used for collecting +// telemetry. Defaults to the meter provided by global provider. +func WithMeter(meter metric.Meter) Option { + return func(c config) config { + c.Meter = meter + return c + } +} + +// WithTracer defines a custom tracer which will be used for collecting +// traces. Defaults to the tracer provided by global provider. +func WithTracer(tracer trace.Tracer) Option { + return func(c config) config { + c.Tracer = tracer + return c + } +} + +// WithCombinedMetricsIDToKVs defines a function that converts a combined +// metrics ID to zero or more attribute.KeyValue for telemetry. +func WithCombinedMetricsIDToKVs(f func([16]byte) []attribute.KeyValue) Option { + return func(c config) config { + c.CombinedMetricsIDToKVs = f + return c + } +} + +// WithLogger defines a custom logger to be used by aggregator. +func WithLogger(logger *zap.Logger) Option { + return func(c config) config { + c.Logger = logger + return c + } +} + +// WithOverflowLogging enables warning logs at harvest time, when overflows have occurred. +// +// Logging of overflows is disabled by default, as most callers are expected to rely on +// metrics to surface cardinality issues. Support for logging exists for historical reasons. +func WithOverflowLogging(enabled bool) Option { + return func(c config) config { + c.OverflowLogging = enabled + return c + } +} + +// WithInMemory defines whether aggregator uses in-memory file system. +func WithInMemory(enabled bool) Option { + return func(c config) config { + c.InMemory = enabled + return c + } +} + +func defaultCfg() config { + return config{ + DataDir: "/tmp", + Processor: stdoutProcessor, + Partitions: 1, + AggregationIntervals: []time.Duration{time.Minute}, + Meter: otel.Meter(instrumentationName), + Tracer: otel.Tracer(instrumentationName), + CombinedMetricsIDToKVs: func(_ [16]byte) []attribute.KeyValue { return nil }, + Logger: zap.Must(zap.NewDevelopment()), + } +} + +func validateCfg(cfg config) error { + if cfg.DataDir == "" { + return errors.New("data directory is required") + } + if cfg.Processor == nil { + return errors.New("processor is required") + } + if cfg.Partitions == 0 { + return errors.New("partitions must be greater than zero") + } + if len(cfg.AggregationIntervals) == 0 { + return errors.New("at least one aggregation interval is required") + } + if !sort.SliceIsSorted(cfg.AggregationIntervals, func(i, j int) bool { + return cfg.AggregationIntervals[i] < cfg.AggregationIntervals[j] + }) { + return errors.New("aggregation intervals must be in ascending order") + } + lowest := cfg.AggregationIntervals[0] + highest := cfg.AggregationIntervals[len(cfg.AggregationIntervals)-1] + for i := 1; i < len(cfg.AggregationIntervals); i++ { + ivl := cfg.AggregationIntervals[i] + if ivl%lowest != 0 { + return errors.New("aggregation intervals must be a factor of lowest interval") + } + } + // For encoding/decoding the processing time for combined metrics we only + // consider seconds granularity making 1 sec the lowest possible + // aggregation interval. We also encode interval as 2 unsigned bytes making + // 65535 (~18 hours) the highest possible aggregation interval. + if lowest < time.Second { + return errors.New("aggregation interval less than one second is not supported") + } + if highest > 18*time.Hour { + return errors.New("aggregation interval greater than 18 hours is not supported") + } + return nil +} + +func stdoutProcessor( + ctx context.Context, + cmk CombinedMetricsKey, + _ *aggregationpb.CombinedMetrics, + _ time.Duration, +) error { + fmt.Printf("Recevied combined metrics with key: %+v\n", cmk) + return nil +} diff --git a/copy/apm-aggregation/aggregators/config_test.go b/copy/apm-aggregation/aggregators/config_test.go new file mode 100644 index 00000000000..bfb46dff26e --- /dev/null +++ b/copy/apm-aggregation/aggregators/config_test.go @@ -0,0 +1,198 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/trace" +) + +func TestNewConfig(t *testing.T) { + defaultCfg := defaultCfg() + customMeter := metric.NewMeterProvider().Meter("test") + customTracer := trace.NewTracerProvider().Tracer("test") + for _, tc := range []struct { + name string + opts []Option + expected func() config + expectedErrorMsg string + }{ + { + name: "empty", + opts: nil, + expected: func() config { + return defaultCfg + }, + }, + { + name: "with_data_dir", + opts: []Option{ + WithDataDir("/test"), + }, + expected: func() config { + cfg := defaultCfg + cfg.DataDir = "/test" + return cfg + }, + }, + { + name: "with_limits", + opts: []Option{ + WithLimits(Limits{ + MaxServices: 10, + MaxSpanGroups: 10, + MaxSpanGroupsPerService: 10, + MaxTransactionGroups: 10, + MaxTransactionGroupsPerService: 10, + MaxServiceTransactionGroups: 10, + MaxServiceTransactionGroupsPerService: 10, + }), + }, + expected: func() config { + cfg := defaultCfg + cfg.Limits = Limits{ + MaxServices: 10, + MaxSpanGroups: 10, + MaxSpanGroupsPerService: 10, + MaxTransactionGroups: 10, + MaxTransactionGroupsPerService: 10, + MaxServiceTransactionGroups: 10, + MaxServiceTransactionGroupsPerService: 10, + } + return cfg + }, + }, + { + name: "with_aggregation_intervals", + opts: []Option{ + WithAggregationIntervals([]time.Duration{time.Minute, time.Hour}), + }, + expected: func() config { + cfg := defaultCfg + cfg.AggregationIntervals = []time.Duration{time.Minute, time.Hour} + return cfg + }, + }, + { + name: "with_harvest_delay", + opts: []Option{ + WithHarvestDelay(time.Hour), + }, + expected: func() config { + cfg := defaultCfg + cfg.HarvestDelay = time.Hour + return cfg + }, + }, + { + name: "with_lookback", + opts: []Option{ + WithLookback(time.Hour), + }, + expected: func() config { + cfg := defaultCfg + cfg.Lookback = time.Hour + return cfg + }, + }, + { + name: "with_meter", + opts: []Option{ + WithMeter(customMeter), + }, + expected: func() config { + cfg := defaultCfg + cfg.Meter = customMeter + return cfg + }, + }, + { + name: "with_tracer", + opts: []Option{ + WithTracer(customTracer), + }, + expected: func() config { + cfg := defaultCfg + cfg.Tracer = customTracer + return cfg + }, + }, + { + name: "with_empty_data_dir", + opts: []Option{ + WithDataDir(""), + }, + expectedErrorMsg: "data directory is required", + }, + { + name: "with_nil_processor", + opts: []Option{ + WithProcessor(nil), + }, + expectedErrorMsg: "processor is required", + }, + { + name: "with_no_aggregation_interval", + opts: []Option{ + WithAggregationIntervals(nil), + }, + expectedErrorMsg: "at least one aggregation interval is required", + }, + { + name: "with_unsorted_aggregation_intervals", + opts: []Option{ + WithAggregationIntervals([]time.Duration{time.Hour, time.Minute}), + }, + expectedErrorMsg: "aggregation intervals must be in ascending order", + }, + { + name: "with_invalid_aggregation_intervals", + opts: []Option{ + WithAggregationIntervals([]time.Duration{10 * time.Second, 15 * time.Second}), + }, + expectedErrorMsg: "aggregation intervals must be a factor of lowest interval", + }, + { + name: "with_out_of_lower_range_aggregation_interval", + opts: []Option{ + WithAggregationIntervals([]time.Duration{time.Millisecond}), + }, + expectedErrorMsg: "aggregation interval less than one second is not supported", + }, + { + name: "with_out_of_upper_range_aggregation_interval", + opts: []Option{ + WithAggregationIntervals([]time.Duration{20 * time.Hour}), + }, + expectedErrorMsg: "aggregation interval greater than 18 hours is not supported", + }, + } { + actual, err := newConfig(tc.opts...) + + if tc.expectedErrorMsg != "" { + assert.EqualError(t, err, tc.expectedErrorMsg) + continue + } + + expected := tc.expected() + assert.NoError(t, err) + + // New logger is created for every call + assert.NotNil(t, actual.Logger) + actual.Logger, expected.Logger = nil, nil + + // Function values are not comparable + assert.NotNil(t, actual.CombinedMetricsIDToKVs) + actual.CombinedMetricsIDToKVs, expected.CombinedMetricsIDToKVs = nil, nil + assert.NotNil(t, actual.Processor) + actual.Processor, expected.Processor = nil, nil + + assert.Equal(t, expected, actual) + } +} diff --git a/copy/apm-aggregation/aggregators/converter.go b/copy/apm-aggregation/aggregators/converter.go new file mode 100644 index 00000000000..e8aaeed40b0 --- /dev/null +++ b/copy/apm-aggregation/aggregators/converter.go @@ -0,0 +1,1134 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "errors" + "fmt" + "math" + "slices" + "sort" + "sync" + "time" + + "github.com/cespare/xxhash/v2" + + "github.com/elastic/apm-aggregation/aggregationpb" + "github.com/elastic/apm-aggregation/aggregators/internal/hdrhistogram" + "github.com/elastic/apm-aggregation/aggregators/internal/protohash" + "github.com/elastic/apm-aggregation/aggregators/nullable" + "github.com/elastic/apm-data/model/modelpb" +) + +const ( + spanMetricsetName = "service_destination" + txnMetricsetName = "transaction" + svcTxnMetricsetName = "service_transaction" + summaryMetricsetName = "service_summary" + + overflowBucketName = "_other" +) + +var ( + partitionedMetricsBuilderPool sync.Pool + eventMetricsBuilderPool sync.Pool +) + +// partitionedMetricsBuilder provides support for building partitioned +// sets of metrics from an event. +type partitionedMetricsBuilder struct { + partitions uint16 + serviceHash xxhash.Digest + builders []*eventMetricsBuilder // partitioned metrics + + // Event metrics are for exactly one service, so we create an array of a + // single element and use that for backing the slice in CombinedMetrics. + serviceAggregationKey aggregationpb.ServiceAggregationKey + serviceMetrics aggregationpb.ServiceMetrics + keyedServiceMetrics aggregationpb.KeyedServiceMetrics + keyedServiceMetricsArray [1]*aggregationpb.KeyedServiceMetrics + + // We reuse a single CombinedMetrics for all partitions, by iterating + // through each partition's metrics and setting them on the CombinedMetrics + // before invoking the callback in eventToCombinedMetrics. + combinedMetrics aggregationpb.CombinedMetrics +} + +func getPartitionedMetricsBuilder( + serviceAggregationKey aggregationpb.ServiceAggregationKey, + partitions uint16, +) *partitionedMetricsBuilder { + p, ok := partitionedMetricsBuilderPool.Get().(*partitionedMetricsBuilder) + if !ok { + p = &partitionedMetricsBuilder{} + p.keyedServiceMetrics.Key = &p.serviceAggregationKey + p.keyedServiceMetrics.Metrics = &p.serviceMetrics + p.keyedServiceMetricsArray[0] = &p.keyedServiceMetrics + p.combinedMetrics.ServiceMetrics = p.keyedServiceMetricsArray[:] + } + p.serviceAggregationKey = serviceAggregationKey + p.serviceHash = protohash.HashServiceAggregationKey(xxhash.Digest{}, &p.serviceAggregationKey) + p.partitions = partitions + return p +} + +// release releases all partitioned builders back to their pools. +// Objects will be reset as needed if/when the builder is reacquired. +func (p *partitionedMetricsBuilder) release() { + for i, mb := range p.builders { + mb.release() + p.builders[i] = nil + } + p.builders = p.builders[:0] + partitionedMetricsBuilderPool.Put(p) +} + +func (p *partitionedMetricsBuilder) processEvent(e *modelpb.APMEvent) { + switch e.Type() { + case modelpb.TransactionEventType: + repCount := e.GetTransaction().GetRepresentativeCount() + if repCount <= 0 { + p.addServiceSummaryMetrics() + return + } + duration := time.Duration(e.GetEvent().GetDuration()) + p.addTransactionMetrics(e, repCount, duration) + p.addServiceTransactionMetrics(e, repCount, duration) + for _, dss := range e.GetTransaction().GetDroppedSpansStats() { + p.addDroppedSpanStatsMetrics(dss, repCount) + } + case modelpb.SpanEventType: + target := e.GetService().GetTarget() + repCount := e.GetSpan().GetRepresentativeCount() + destSvc := e.GetSpan().GetDestinationService().GetResource() + if repCount <= 0 || (target == nil && destSvc == "") { + p.addServiceSummaryMetrics() + return + } + p.addSpanMetrics(e, repCount) + default: + // All other event types should add an empty service metrics, + // for adding to service summary metrics. + p.addServiceSummaryMetrics() + } +} + +func (p *partitionedMetricsBuilder) addTransactionMetrics(e *modelpb.APMEvent, count float64, duration time.Duration) { + var key aggregationpb.TransactionAggregationKey + setTransactionKey(e, &key) + hash := protohash.HashTransactionAggregationKey(p.serviceHash, &key) + + mb := p.get(hash) + mb.transactionAggregationKey = key + + hdr := hdrhistogram.New() + hdr.RecordDuration(duration, count) + setHistogramProto(hdr, &mb.transactionHistogram) + mb.transactionMetrics.Histogram = &mb.transactionHistogram + mb.keyedTransactionMetricsSlice = mb.keyedTransactionMetricsArray[:] +} + +func (p *partitionedMetricsBuilder) addServiceTransactionMetrics(e *modelpb.APMEvent, count float64, duration time.Duration) { + var key aggregationpb.ServiceTransactionAggregationKey + setServiceTransactionKey(e, &key) + hash := protohash.HashServiceTransactionAggregationKey(p.serviceHash, &key) + + mb := p.get(hash) + mb.serviceTransactionAggregationKey = key + + if mb.transactionMetrics.Histogram == nil { + // mb.TransactionMetrics.Histogram will be set if the event's + // transaction metric ended up in the same partition. + hdr := hdrhistogram.New() + hdr.RecordDuration(duration, count) + setHistogramProto(hdr, &mb.transactionHistogram) + } + mb.serviceTransactionMetrics.Histogram = &mb.transactionHistogram + switch e.GetEvent().GetOutcome() { + case "failure": + mb.serviceTransactionMetrics.SuccessCount = 0 + mb.serviceTransactionMetrics.FailureCount = count + case "success": + mb.serviceTransactionMetrics.SuccessCount = count + mb.serviceTransactionMetrics.FailureCount = 0 + default: + mb.serviceTransactionMetrics.SuccessCount = 0 + mb.serviceTransactionMetrics.FailureCount = 0 + } + mb.keyedServiceTransactionMetricsSlice = mb.keyedServiceTransactionMetricsArray[:] +} + +func (p *partitionedMetricsBuilder) addDroppedSpanStatsMetrics(dss *modelpb.DroppedSpanStats, repCount float64) { + var key aggregationpb.SpanAggregationKey + setDroppedSpanStatsKey(dss, &key) + hash := protohash.HashSpanAggregationKey(p.serviceHash, &key) + + mb := p.get(hash) + i := len(mb.keyedSpanMetricsSlice) + if i == len(mb.keyedSpanMetricsArray) { + // No more capacity. The spec says that when 128 dropped span + // stats entries are reached, then any remaining entries will + // be silently discarded. + return + } + + mb.spanAggregationKey[i] = key + setDroppedSpanStatsMetrics(dss, repCount, &mb.spanMetrics[i]) + mb.keyedSpanMetrics[i].Key = &mb.spanAggregationKey[i] + mb.keyedSpanMetrics[i].Metrics = &mb.spanMetrics[i] + mb.keyedSpanMetricsSlice = append(mb.keyedSpanMetricsSlice, &mb.keyedSpanMetrics[i]) +} + +func (p *partitionedMetricsBuilder) addSpanMetrics(e *modelpb.APMEvent, repCount float64) { + var key aggregationpb.SpanAggregationKey + setSpanKey(e, &key) + hash := protohash.HashSpanAggregationKey(p.serviceHash, &key) + + mb := p.get(hash) + i := len(mb.keyedSpanMetricsSlice) + mb.spanAggregationKey[i] = key + setSpanMetrics(e, repCount, &mb.spanMetrics[i]) + mb.keyedSpanMetrics[i].Key = &mb.spanAggregationKey[i] + mb.keyedSpanMetrics[i].Metrics = &mb.spanMetrics[i] + mb.keyedSpanMetricsSlice = append(mb.keyedSpanMetricsSlice, &mb.keyedSpanMetrics[i]) +} + +func (p *partitionedMetricsBuilder) addServiceSummaryMetrics() { + // There are no actual metric values, we're just want to + // create documents for the dimensions, so we can build a + // list of services. + _ = p.get(p.serviceHash) +} + +func (p *partitionedMetricsBuilder) get(h xxhash.Digest) *eventMetricsBuilder { + partition := uint16(h.Sum64() % uint64(p.partitions)) + for _, mb := range p.builders { + if mb.partition == partition { + return mb + } + } + mb := getEventMetricsBuilder(partition) + p.builders = append(p.builders, mb) + return mb +} + +// eventMetricsBuilder holds memory for the contents of per-partition +// ServiceMetrics. Each instance of the struct is capable of holding +// as many metrics as may be produced for a single event. +// +// For each metric type, the builder holds: +// - an array with enough capacity to hold the maximum possible +// number of that type +// - a slice of the array, for tracking the actual number of +// metrics of that type that will be produced; this is what +// will be used for setting CombinedMetrics fields +// - for each array element, space for an aggregation key +// - for each array element, space for the metric values +type eventMetricsBuilder struct { + partition uint16 + + // Preallocate space for a single-valued histogram. This histogram may + // be used for either or both transaction and service transaction metrics. + transactionHDRHistogramRepresentation *hdrhistogram.HistogramRepresentation + transactionHistogramCounts [1]int64 + transactionHistogramBuckets [1]int32 + transactionHistogram aggregationpb.HDRHistogram + + // There can be at most 1 transaction metric per event. + transactionAggregationKey aggregationpb.TransactionAggregationKey + transactionMetrics aggregationpb.TransactionMetrics + keyedTransactionMetrics aggregationpb.KeyedTransactionMetrics + keyedTransactionMetricsArray [1]*aggregationpb.KeyedTransactionMetrics + keyedTransactionMetricsSlice []*aggregationpb.KeyedTransactionMetrics + + // There can be at most 1 service transaction metric per event. + serviceTransactionAggregationKey aggregationpb.ServiceTransactionAggregationKey + serviceTransactionMetrics aggregationpb.ServiceTransactionMetrics + keyedServiceTransactionMetrics aggregationpb.KeyedServiceTransactionMetrics + keyedServiceTransactionMetricsArray [1]*aggregationpb.KeyedServiceTransactionMetrics + keyedServiceTransactionMetricsSlice []*aggregationpb.KeyedServiceTransactionMetrics + + // There can be at most 128 span metrics per event: + // - exactly 1 for a span event + // - at most 128 (dropped span stats) for a transaction event (1) + // + // (1) https://github.com/elastic/apm/blob/main/specs/agents/handling-huge-traces/tracing-spans-dropped-stats.md#limits + spanAggregationKey [128]aggregationpb.SpanAggregationKey + spanMetrics [128]aggregationpb.SpanMetrics + keyedSpanMetrics [128]aggregationpb.KeyedSpanMetrics + keyedSpanMetricsArray [128]*aggregationpb.KeyedSpanMetrics + keyedSpanMetricsSlice []*aggregationpb.KeyedSpanMetrics +} + +func getEventMetricsBuilder(partition uint16) *eventMetricsBuilder { + mb, ok := eventMetricsBuilderPool.Get().(*eventMetricsBuilder) + if ok { + mb.partition = partition + // Explicitly reset instead of invoking `Reset` to avoid extra cost due to + // additional protobuf specfic resetting logic implemented by `Reset`. + mb.serviceTransactionMetrics = aggregationpb.ServiceTransactionMetrics{} + mb.transactionMetrics = aggregationpb.TransactionMetrics{} + for i := range mb.spanMetrics { + mb.spanMetrics[i] = aggregationpb.SpanMetrics{} + } + mb.transactionHDRHistogramRepresentation.CountsRep.Reset() + mb.keyedServiceTransactionMetricsSlice = mb.keyedServiceTransactionMetricsSlice[:0] + mb.keyedTransactionMetricsSlice = mb.keyedTransactionMetricsSlice[:0] + mb.keyedSpanMetricsSlice = mb.keyedSpanMetricsSlice[:0] + return mb + } + mb = &eventMetricsBuilder{partition: partition} + mb.transactionHDRHistogramRepresentation = hdrhistogram.New() + mb.transactionHistogram.Counts = mb.transactionHistogramCounts[:0] + mb.transactionHistogram.Buckets = mb.transactionHistogramBuckets[:0] + mb.transactionMetrics.Histogram = nil + mb.keyedTransactionMetrics.Key = &mb.transactionAggregationKey + mb.keyedTransactionMetrics.Metrics = &mb.transactionMetrics + mb.keyedTransactionMetricsArray[0] = &mb.keyedTransactionMetrics + mb.keyedTransactionMetricsSlice = mb.keyedTransactionMetricsArray[:0] + mb.keyedServiceTransactionMetrics.Key = &mb.serviceTransactionAggregationKey + mb.keyedServiceTransactionMetrics.Metrics = &mb.serviceTransactionMetrics + mb.keyedServiceTransactionMetricsArray[0] = &mb.keyedServiceTransactionMetrics + mb.keyedServiceTransactionMetricsSlice = mb.keyedServiceTransactionMetricsArray[:0] + mb.keyedSpanMetricsSlice = mb.keyedSpanMetricsArray[:0] + return mb +} + +// release releases the builder back to the pool. +// Objects will be reset as needed if/when the builder is reacquired. +func (mb *eventMetricsBuilder) release() { + eventMetricsBuilderPool.Put(mb) +} + +// eventToCombinedMetrics converts APMEvent to one or more CombinedMetrics and +// calls the provided callback for each pair of CombinedMetricsKey and +// CombinedMetrics. The callback MUST NOT hold the reference of the passed +// CombinedMetrics. If required, the callback can call CloneVT to clone the +// CombinedMetrics. If an event results in multiple metrics, they may be spread +// across different partitions. +// +// eventToCombinedMetrics will never produce overflow metrics, as it applies to a +// single APMEvent. +func eventToCombinedMetrics( + e *modelpb.APMEvent, + unpartitionedKey CombinedMetricsKey, + partitions uint16, + callback func(CombinedMetricsKey, *aggregationpb.CombinedMetrics) error, +) error { + globalLabels, err := marshalEventGlobalLabels(e) + if err != nil { + return fmt.Errorf("failed to marshal global labels: %w", err) + } + + pmb := getPartitionedMetricsBuilder( + aggregationpb.ServiceAggregationKey{ + Timestamp: modelpb.FromTime( + modelpb.ToTime(e.GetTimestamp()).Truncate(unpartitionedKey.Interval), + ), + ServiceName: e.GetService().GetName(), + ServiceEnvironment: e.GetService().GetEnvironment(), + ServiceLanguageName: e.GetService().GetLanguage().GetName(), + AgentName: e.GetAgent().GetName(), + GlobalLabelsStr: globalLabels, + }, + partitions, + ) + defer pmb.release() + + pmb.processEvent(e) + if len(pmb.builders) == 0 { + // This is unexpected state as any APMEvent must result in atleast the + // service summary metric. If such a state happens then it would indicate + // a bug in `processEvent`. + return fmt.Errorf("service summary metric must be produced for any event") + } + + // Approximate events total by uniformly distributing the events total + // amongst the partitioned key values. + pmb.combinedMetrics.EventsTotal = 1 / float64(len(pmb.builders)) + pmb.combinedMetrics.YoungestEventTimestamp = e.GetEvent().GetReceived() + + var errs []error + for _, mb := range pmb.builders { + key := unpartitionedKey + key.PartitionID = mb.partition + pmb.serviceMetrics.TransactionMetrics = mb.keyedTransactionMetricsSlice + pmb.serviceMetrics.ServiceTransactionMetrics = mb.keyedServiceTransactionMetricsSlice + pmb.serviceMetrics.SpanMetrics = mb.keyedSpanMetricsSlice + if err := callback(key, &pmb.combinedMetrics); err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return fmt.Errorf("failed while executing callback: %w", errors.Join(errs...)) + } + return nil +} + +// CombinedMetricsToBatch converts CombinedMetrics to a batch of APMEvents. +// Events in the batch are popualted using vtproto's sync pool and should be +// released back to the pool using `APMEvent#ReturnToVTPool`. +func CombinedMetricsToBatch( + cm *aggregationpb.CombinedMetrics, + processingTime time.Time, + aggInterval time.Duration, +) (*modelpb.Batch, error) { + if cm == nil || len(cm.ServiceMetrics) == 0 { + return nil, nil + } + + var batchSize int + + // service_summary overflow metric + if len(cm.OverflowServicesEstimator) > 0 { + batchSize++ + if len(cm.OverflowServices.OverflowTransactionsEstimator) > 0 { + batchSize++ + } + if len(cm.OverflowServices.OverflowServiceTransactionsEstimator) > 0 { + batchSize++ + } + if len(cm.OverflowServices.OverflowSpansEstimator) > 0 { + batchSize++ + } + } + + for _, ksm := range cm.ServiceMetrics { + sm := ksm.Metrics + batchSize += len(sm.TransactionMetrics) + batchSize += len(sm.ServiceTransactionMetrics) + batchSize += len(sm.SpanMetrics) + batchSize++ // Each service will create a service summary metric + if sm.OverflowGroups == nil { + continue + } + if len(sm.OverflowGroups.OverflowTransactionsEstimator) > 0 { + batchSize++ + } + if len(sm.OverflowGroups.OverflowServiceTransactionsEstimator) > 0 { + batchSize++ + } + if len(sm.OverflowGroups.OverflowSpansEstimator) > 0 { + batchSize++ + } + } + + b := make(modelpb.Batch, 0, batchSize) + aggIntervalStr := formatDuration(aggInterval) + now := time.Now() + for _, ksm := range cm.ServiceMetrics { + sk, sm := ksm.Key, ksm.Metrics + + var gl globalLabels + if err := gl.UnmarshalBinary(sk.GlobalLabelsStr); err != nil { + return nil, fmt.Errorf("failed to unmarshal global labels: %w", err) + } + getBaseEventWithLabels := func() *modelpb.APMEvent { + event := getBaseEvent(sk, now) + event.Labels = gl.Labels + event.NumericLabels = gl.NumericLabels + return event + } + + // transaction metrics + for _, ktm := range sm.TransactionMetrics { + event := getBaseEventWithLabels() + txnMetricsToAPMEvent(ktm.Key, ktm.Metrics, event, aggIntervalStr) + b = append(b, event) + } + // service transaction metrics + for _, kstm := range sm.ServiceTransactionMetrics { + event := getBaseEventWithLabels() + svcTxnMetricsToAPMEvent(kstm.Key, kstm.Metrics, event, aggIntervalStr) + b = append(b, event) + } + // service destination metrics + for _, kspm := range sm.SpanMetrics { + event := getBaseEventWithLabels() + spanMetricsToAPMEvent(kspm.Key, kspm.Metrics, event, aggIntervalStr) + b = append(b, event) + } + + // service summary metrics + event := getBaseEventWithLabels() + serviceMetricsToAPMEvent(event, aggIntervalStr) + b = append(b, event) + + if sm.OverflowGroups == nil { + continue + } + if len(sm.OverflowGroups.OverflowTransactionsEstimator) > 0 { + estimator := hllSketch(sm.OverflowGroups.OverflowTransactionsEstimator) + event := getBaseEvent(sk, now) + overflowTxnMetricsToAPMEvent( + processingTime, + sm.OverflowGroups.OverflowTransactions, + estimator.Estimate(), + event, + aggIntervalStr, + ) + b = append(b, event) + } + if len(sm.OverflowGroups.OverflowServiceTransactionsEstimator) > 0 { + estimator := hllSketch( + sm.OverflowGroups.OverflowServiceTransactionsEstimator, + ) + event := getBaseEvent(sk, now) + overflowSvcTxnMetricsToAPMEvent( + processingTime, + sm.OverflowGroups.OverflowServiceTransactions, + estimator.Estimate(), + event, + aggIntervalStr, + ) + b = append(b, event) + } + if len(sm.OverflowGroups.OverflowSpansEstimator) > 0 { + estimator := hllSketch(sm.OverflowGroups.OverflowSpansEstimator) + event := getBaseEvent(sk, now) + overflowSpanMetricsToAPMEvent( + processingTime, + sm.OverflowGroups.OverflowSpans, + estimator.Estimate(), + event, + aggIntervalStr, + ) + b = append(b, event) + } + } + if len(cm.OverflowServicesEstimator) > 0 { + estimator := hllSketch(cm.OverflowServicesEstimator) + event := getOverflowBaseEvent(cm.YoungestEventTimestamp) + overflowServiceMetricsToAPMEvent( + processingTime, + estimator.Estimate(), + event, + aggIntervalStr, + ) + b = append(b, event) + if len(cm.OverflowServices.OverflowTransactionsEstimator) > 0 { + estimator := hllSketch(cm.OverflowServices.OverflowTransactionsEstimator) + event := getOverflowBaseEvent(cm.YoungestEventTimestamp) + overflowTxnMetricsToAPMEvent( + processingTime, + cm.OverflowServices.OverflowTransactions, + estimator.Estimate(), + event, + aggIntervalStr, + ) + b = append(b, event) + + } + if len(cm.OverflowServices.OverflowServiceTransactionsEstimator) > 0 { + estimator := hllSketch( + cm.OverflowServices.OverflowServiceTransactionsEstimator, + ) + event := getOverflowBaseEvent(cm.YoungestEventTimestamp) + overflowSvcTxnMetricsToAPMEvent( + processingTime, + cm.OverflowServices.OverflowServiceTransactions, + estimator.Estimate(), + event, + aggIntervalStr, + ) + b = append(b, event) + } + if len(cm.OverflowServices.OverflowSpansEstimator) > 0 { + estimator := hllSketch(cm.OverflowServices.OverflowSpansEstimator) + event := getOverflowBaseEvent(cm.YoungestEventTimestamp) + overflowSpanMetricsToAPMEvent( + processingTime, + cm.OverflowServices.OverflowSpans, + estimator.Estimate(), + event, + aggIntervalStr, + ) + b = append(b, event) + } + } + return &b, nil +} + +func setSpanMetrics(e *modelpb.APMEvent, repCount float64, out *aggregationpb.SpanMetrics) { + var count uint32 = 1 + duration := time.Duration(e.GetEvent().GetDuration()) + if composite := e.GetSpan().GetComposite(); composite != nil { + count = composite.GetCount() + duration = time.Duration(composite.GetSum() * float64(time.Millisecond)) + } + out.Count = float64(count) * repCount + out.Sum = float64(duration) * repCount +} + +func setDroppedSpanStatsMetrics(dss *modelpb.DroppedSpanStats, repCount float64, out *aggregationpb.SpanMetrics) { + out.Count = float64(dss.GetDuration().GetCount()) * repCount + out.Sum = float64(dss.GetDuration().GetSum()) * repCount +} + +func getBaseEvent( + key *aggregationpb.ServiceAggregationKey, + received time.Time, +) *modelpb.APMEvent { + event := &modelpb.APMEvent{} + event.Timestamp = key.Timestamp + event.Metricset = &modelpb.Metricset{} + event.Service = &modelpb.Service{} + event.Service.Name = key.ServiceName + event.Service.Environment = key.ServiceEnvironment + + if key.ServiceLanguageName != "" { + event.Service.Language = &modelpb.Language{} + event.Service.Language.Name = key.ServiceLanguageName + } + + if key.AgentName != "" { + event.Agent = &modelpb.Agent{} + event.Agent.Name = key.AgentName + } + + event.Event = &modelpb.Event{} + event.Event.Received = modelpb.FromTime(received) + + return event +} + +func getOverflowBaseEvent(youngestEventTS uint64) *modelpb.APMEvent { + e := &modelpb.APMEvent{} + e.Metricset = &modelpb.Metricset{} + e.Service = &modelpb.Service{} + e.Service.Name = overflowBucketName + + e.Event = &modelpb.Event{} + e.Event.Received = youngestEventTS + return e +} + +func serviceMetricsToAPMEvent( + baseEvent *modelpb.APMEvent, + intervalStr string, +) { + // Most service keys will already be present in the base event + if baseEvent.Metricset == nil { + baseEvent.Metricset = &modelpb.Metricset{} + } + baseEvent.Metricset.Name = summaryMetricsetName + baseEvent.Metricset.Interval = intervalStr +} + +func txnMetricsToAPMEvent( + key *aggregationpb.TransactionAggregationKey, + metrics *aggregationpb.TransactionMetrics, + baseEvent *modelpb.APMEvent, + intervalStr string, +) { + histogram := hdrhistogram.New() + histogramFromProto(histogram, metrics.Histogram) + totalCount, counts, values := histogram.Buckets() + eventSuccessCount := &modelpb.SummaryMetric{} + switch key.EventOutcome { + case "success": + eventSuccessCount.Count = totalCount + eventSuccessCount.Sum = float64(totalCount) + case "failure": + eventSuccessCount.Count = totalCount + case "unknown": + // Keep both Count and Sum as 0. + } + transactionDurationSummary := &modelpb.SummaryMetric{} + transactionDurationSummary.Count = totalCount + for i, v := range values { + transactionDurationSummary.Sum += v * float64(counts[i]) + } + + if baseEvent.Transaction == nil { + baseEvent.Transaction = &modelpb.Transaction{} + } + baseEvent.Transaction.Name = key.TransactionName + baseEvent.Transaction.Type = key.TransactionType + baseEvent.Transaction.Result = key.TransactionResult + baseEvent.Transaction.Root = key.TraceRoot + baseEvent.Transaction.DurationSummary = transactionDurationSummary + baseEvent.Transaction.DurationHistogram = &modelpb.Histogram{} + baseEvent.Transaction.DurationHistogram.Counts = counts + baseEvent.Transaction.DurationHistogram.Values = values + + if baseEvent.Metricset == nil { + baseEvent.Metricset = &modelpb.Metricset{} + } + baseEvent.Metricset.Name = txnMetricsetName + baseEvent.Metricset.DocCount = totalCount + baseEvent.Metricset.Interval = intervalStr + + if baseEvent.Event == nil { + baseEvent.Event = &modelpb.Event{} + } + baseEvent.Event.Outcome = key.EventOutcome + baseEvent.Event.SuccessCount = eventSuccessCount + + if key.ContainerId != "" { + if baseEvent.Container == nil { + baseEvent.Container = &modelpb.Container{} + } + baseEvent.Container.Id = key.ContainerId + } + + if key.KubernetesPodName != "" { + if baseEvent.Kubernetes == nil { + baseEvent.Kubernetes = &modelpb.Kubernetes{} + } + baseEvent.Kubernetes.PodName = key.KubernetesPodName + } + + if key.ServiceVersion != "" { + if baseEvent.Service == nil { + baseEvent.Service = &modelpb.Service{} + } + baseEvent.Service.Version = key.ServiceVersion + } + + if key.ServiceNodeName != "" { + if baseEvent.Service == nil { + baseEvent.Service = &modelpb.Service{} + } + if baseEvent.Service.Node == nil { + baseEvent.Service.Node = &modelpb.ServiceNode{} + } + baseEvent.Service.Node.Name = key.ServiceNodeName + } + + if key.ServiceRuntimeName != "" || + key.ServiceRuntimeVersion != "" { + + if baseEvent.Service == nil { + baseEvent.Service = &modelpb.Service{} + } + if baseEvent.Service.Runtime == nil { + baseEvent.Service.Runtime = &modelpb.Runtime{} + } + baseEvent.Service.Runtime.Name = key.ServiceRuntimeName + baseEvent.Service.Runtime.Version = key.ServiceRuntimeVersion + } + + if key.ServiceLanguageVersion != "" { + if baseEvent.Service == nil { + baseEvent.Service = &modelpb.Service{} + } + if baseEvent.Service.Language == nil { + baseEvent.Service.Language = &modelpb.Language{} + } + baseEvent.Service.Language.Version = key.ServiceLanguageVersion + } + + if key.HostHostname != "" || + key.HostName != "" { + + if baseEvent.Host == nil { + baseEvent.Host = &modelpb.Host{} + } + baseEvent.Host.Hostname = key.HostHostname + baseEvent.Host.Name = key.HostName + } + + if key.HostOsPlatform != "" { + if baseEvent.Host == nil { + baseEvent.Host = &modelpb.Host{} + } + if baseEvent.Host.Os == nil { + baseEvent.Host.Os = &modelpb.OS{} + } + baseEvent.Host.Os.Platform = key.HostOsPlatform + } + + faasColdstart := nullable.Bool(key.FaasColdstart) + if faasColdstart != nullable.Nil || + key.FaasId != "" || + key.FaasName != "" || + key.FaasVersion != "" || + key.FaasTriggerType != "" { + + if baseEvent.Faas == nil { + baseEvent.Faas = &modelpb.Faas{} + } + baseEvent.Faas.ColdStart = faasColdstart.ToBoolPtr() + baseEvent.Faas.Id = key.FaasId + baseEvent.Faas.Name = key.FaasName + baseEvent.Faas.Version = key.FaasVersion + baseEvent.Faas.TriggerType = key.FaasTriggerType + } + + if key.CloudProvider != "" || + key.CloudRegion != "" || + key.CloudAvailabilityZone != "" || + key.CloudServiceName != "" || + key.CloudAccountId != "" || + key.CloudAccountName != "" || + key.CloudMachineType != "" || + key.CloudProjectId != "" || + key.CloudProjectName != "" { + + if baseEvent.Cloud == nil { + baseEvent.Cloud = &modelpb.Cloud{} + } + baseEvent.Cloud.Provider = key.CloudProvider + baseEvent.Cloud.Region = key.CloudRegion + baseEvent.Cloud.AvailabilityZone = key.CloudAvailabilityZone + baseEvent.Cloud.ServiceName = key.CloudServiceName + baseEvent.Cloud.AccountId = key.CloudAccountId + baseEvent.Cloud.AccountName = key.CloudAccountName + baseEvent.Cloud.MachineType = key.CloudMachineType + baseEvent.Cloud.ProjectId = key.CloudProjectId + baseEvent.Cloud.ProjectName = key.CloudProjectName + } +} + +func svcTxnMetricsToAPMEvent( + key *aggregationpb.ServiceTransactionAggregationKey, + metrics *aggregationpb.ServiceTransactionMetrics, + baseEvent *modelpb.APMEvent, + intervalStr string, +) { + histogram := hdrhistogram.New() + histogramFromProto(histogram, metrics.Histogram) + totalCount, counts, values := histogram.Buckets() + transactionDurationSummary := modelpb.SummaryMetric{ + Count: totalCount, + } + for i, v := range values { + transactionDurationSummary.Sum += v * float64(counts[i]) + } + + if baseEvent.Metricset == nil { + baseEvent.Metricset = &modelpb.Metricset{} + } + baseEvent.Metricset.Name = svcTxnMetricsetName + baseEvent.Metricset.DocCount = totalCount + baseEvent.Metricset.Interval = intervalStr + + if baseEvent.Transaction == nil { + baseEvent.Transaction = &modelpb.Transaction{} + } + baseEvent.Transaction.Type = key.TransactionType + baseEvent.Transaction.DurationSummary = &transactionDurationSummary + if baseEvent.Transaction.DurationHistogram == nil { + baseEvent.Transaction.DurationHistogram = &modelpb.Histogram{} + } + baseEvent.Transaction.DurationHistogram.Counts = counts + baseEvent.Transaction.DurationHistogram.Values = values + + if baseEvent.Event == nil { + baseEvent.Event = &modelpb.Event{} + } + if baseEvent.Event.SuccessCount == nil { + baseEvent.Event.SuccessCount = &modelpb.SummaryMetric{} + } + baseEvent.Event.SuccessCount.Count = + uint64(math.Round(metrics.SuccessCount + metrics.FailureCount)) + baseEvent.Event.SuccessCount.Sum = math.Round(metrics.SuccessCount) +} + +func spanMetricsToAPMEvent( + key *aggregationpb.SpanAggregationKey, + metrics *aggregationpb.SpanMetrics, + baseEvent *modelpb.APMEvent, + intervalStr string, +) { + var target *modelpb.ServiceTarget + if key.TargetName != "" || key.TargetType != "" { + target = &modelpb.ServiceTarget{} + target.Type = key.TargetType + target.Name = key.TargetName + } + if baseEvent.Service == nil { + baseEvent.Service = &modelpb.Service{} + } + baseEvent.Service.Target = target + + if baseEvent.Metricset == nil { + baseEvent.Metricset = &modelpb.Metricset{} + } + baseEvent.Metricset.Name = spanMetricsetName + baseEvent.Metricset.DocCount = uint64(math.Round(metrics.Count)) + baseEvent.Metricset.Interval = intervalStr + + if baseEvent.Span == nil { + baseEvent.Span = &modelpb.Span{} + } + baseEvent.Span.Name = key.SpanName + + if baseEvent.Span.DestinationService == nil { + baseEvent.Span.DestinationService = &modelpb.DestinationService{} + } + baseEvent.Span.DestinationService.Resource = key.Resource + if baseEvent.Span.DestinationService.ResponseTime == nil { + baseEvent.Span.DestinationService.ResponseTime = + &modelpb.AggregatedDuration{} + } + baseEvent.Span.DestinationService.ResponseTime.Count = + uint64(math.Round(metrics.Count)) + baseEvent.Span.DestinationService.ResponseTime.Sum = + uint64(math.Round(metrics.Sum)) + + if key.Outcome != "" { + if baseEvent.Event == nil { + baseEvent.Event = &modelpb.Event{} + } + baseEvent.Event.Outcome = key.Outcome + } +} + +func overflowServiceMetricsToAPMEvent( + processingTime time.Time, + overflowCount uint64, + baseEvent *modelpb.APMEvent, + intervalStr string, +) { + // Overflow metrics use the processing time as their timestamp rather than + // the event time. This makes sure that they can be associated with the + // appropriate time when the event volume caused them to overflow. + baseEvent.Timestamp = modelpb.FromTime(processingTime) + serviceMetricsToAPMEvent(baseEvent, intervalStr) + + sample := &modelpb.MetricsetSample{} + sample.Name = "service_summary.aggregation.overflow_count" + sample.Value = float64(overflowCount) + if baseEvent.Metricset == nil { + baseEvent.Metricset = &modelpb.Metricset{} + } + baseEvent.Metricset.Samples = append(baseEvent.Metricset.Samples, sample) +} + +// overflowTxnMetricsToAPMEvent maps the fields of overflow +// transaction to the passed APMEvent. This only updates transcation +// metrics related fields and expects that service related fields +// are present in the passed APMEvent. +// +// For the doc count, unlike the span metrics which uses estimated +// overflow count, the transaction metrics uses the value derived +// from the histogram to avoid consistency issues between the +// overflow estimate and the histogram. +func overflowTxnMetricsToAPMEvent( + processingTime time.Time, + overflowTxn *aggregationpb.TransactionMetrics, + overflowCount uint64, + baseEvent *modelpb.APMEvent, + intervalStr string, +) { + // Overflow metrics use the processing time as their timestamp rather than + // the event time. This makes sure that they can be associated with the + // appropriate time when the event volume caused them to overflow. + baseEvent.Timestamp = modelpb.FromTime(processingTime) + overflowKey := &aggregationpb.TransactionAggregationKey{ + TransactionName: overflowBucketName, + } + txnMetricsToAPMEvent(overflowKey, overflowTxn, baseEvent, intervalStr) + + sample := &modelpb.MetricsetSample{} + sample.Name = "transaction.aggregation.overflow_count" + sample.Value = float64(overflowCount) + if baseEvent.Metricset == nil { + baseEvent.Metricset = &modelpb.Metricset{} + } + baseEvent.Metricset.Samples = append(baseEvent.Metricset.Samples, sample) +} + +func overflowSvcTxnMetricsToAPMEvent( + processingTime time.Time, + overflowSvcTxn *aggregationpb.ServiceTransactionMetrics, + overflowCount uint64, + baseEvent *modelpb.APMEvent, + intervalStr string, +) { + // Overflow metrics use the processing time as their timestamp rather than + // the event time. This makes sure that they can be associated with the + // appropriate time when the event volume caused them to overflow. + baseEvent.Timestamp = modelpb.FromTime(processingTime) + overflowKey := &aggregationpb.ServiceTransactionAggregationKey{ + TransactionType: overflowBucketName, + } + svcTxnMetricsToAPMEvent(overflowKey, overflowSvcTxn, baseEvent, intervalStr) + + sample := &modelpb.MetricsetSample{} + sample.Name = "service_transaction.aggregation.overflow_count" + sample.Value = float64(overflowCount) + if baseEvent.Metricset == nil { + baseEvent.Metricset = &modelpb.Metricset{} + } + baseEvent.Metricset.Samples = append(baseEvent.Metricset.Samples, sample) +} + +func overflowSpanMetricsToAPMEvent( + processingTime time.Time, + overflowSpan *aggregationpb.SpanMetrics, + overflowCount uint64, + baseEvent *modelpb.APMEvent, + intervalStr string, +) { + // Overflow metrics use the processing time as their timestamp rather than + // the event time. This makes sure that they can be associated with the + // appropriate time when the event volume caused them to overflow. + baseEvent.Timestamp = modelpb.FromTime(processingTime) + overflowKey := &aggregationpb.SpanAggregationKey{ + TargetName: overflowBucketName, + } + spanMetricsToAPMEvent(overflowKey, overflowSpan, baseEvent, intervalStr) + + sample := &modelpb.MetricsetSample{} + sample.Name = "service_destination.aggregation.overflow_count" + sample.Value = float64(overflowCount) + if baseEvent.Metricset == nil { + baseEvent.Metricset = &modelpb.Metricset{} + } + baseEvent.Metricset.Samples = append(baseEvent.Metricset.Samples, sample) + baseEvent.Metricset.DocCount = overflowCount +} + +func marshalEventGlobalLabels(e *modelpb.APMEvent) ([]byte, error) { + var labelsCnt, numericLabelsCnt int + for _, v := range e.Labels { + if !v.Global { + continue + } + labelsCnt++ + } + for _, v := range e.NumericLabels { + if !v.Global { + continue + } + numericLabelsCnt++ + } + + if labelsCnt == 0 && numericLabelsCnt == 0 { + return nil, nil + } + + pb := &aggregationpb.GlobalLabels{} + pb.Labels = slices.Grow(pb.Labels, labelsCnt)[:labelsCnt] + pb.NumericLabels = slices.Grow(pb.NumericLabels, numericLabelsCnt)[:numericLabelsCnt] + + var i int + // Keys must be sorted to ensure wire formats are deterministically generated and strings are directly comparable + // i.e. Protobuf formats are equal if and only if the structs are equal + for k, v := range e.Labels { + if !v.Global { + continue + } + if pb.Labels[i] == nil { + pb.Labels[i] = &aggregationpb.Label{} + } + pb.Labels[i].Key = k + pb.Labels[i].Value = v.Value + pb.Labels[i].Values = slices.Grow(pb.Labels[i].Values, len(v.Values))[:len(v.Values)] + copy(pb.Labels[i].Values, v.Values) + i++ + } + sort.Slice(pb.Labels, func(i, j int) bool { + return pb.Labels[i].Key < pb.Labels[j].Key + }) + + i = 0 + for k, v := range e.NumericLabels { + if !v.Global { + continue + } + if pb.NumericLabels[i] == nil { + pb.NumericLabels[i] = &aggregationpb.NumericLabel{} + } + pb.NumericLabels[i].Key = k + pb.NumericLabels[i].Value = v.Value + pb.NumericLabels[i].Values = slices.Grow(pb.NumericLabels[i].Values, len(v.Values))[:len(v.Values)] + copy(pb.NumericLabels[i].Values, v.Values) + i++ + } + sort.Slice(pb.NumericLabels, func(i, j int) bool { + return pb.NumericLabels[i].Key < pb.NumericLabels[j].Key + }) + + return pb.MarshalVT() +} + +func setTransactionKey(e *modelpb.APMEvent, key *aggregationpb.TransactionAggregationKey) { + var faasColdstart nullable.Bool + faas := e.GetFaas() + if faas != nil { + faasColdstart.ParseBoolPtr(faas.ColdStart) + } + + key.TraceRoot = e.GetParentId() == "" + + key.ContainerId = e.GetContainer().GetId() + key.KubernetesPodName = e.GetKubernetes().GetPodName() + + key.ServiceVersion = e.GetService().GetVersion() + key.ServiceNodeName = e.GetService().GetNode().GetName() + + key.ServiceRuntimeName = e.GetService().GetRuntime().GetName() + key.ServiceRuntimeVersion = e.GetService().GetRuntime().GetVersion() + key.ServiceLanguageVersion = e.GetService().GetLanguage().GetVersion() + + key.HostHostname = e.GetHost().GetHostname() + key.HostName = e.GetHost().GetName() + key.HostOsPlatform = e.GetHost().GetOs().GetPlatform() + + key.EventOutcome = e.GetEvent().GetOutcome() + + key.TransactionName = e.GetTransaction().GetName() + key.TransactionType = e.GetTransaction().GetType() + key.TransactionResult = e.GetTransaction().GetResult() + + key.FaasColdstart = uint32(faasColdstart) + key.FaasId = faas.GetId() + key.FaasName = faas.GetName() + key.FaasVersion = faas.GetVersion() + key.FaasTriggerType = faas.GetTriggerType() + + key.CloudProvider = e.GetCloud().GetProvider() + key.CloudRegion = e.GetCloud().GetRegion() + key.CloudAvailabilityZone = e.GetCloud().GetAvailabilityZone() + key.CloudServiceName = e.GetCloud().GetServiceName() + key.CloudAccountId = e.GetCloud().GetAccountId() + key.CloudAccountName = e.GetCloud().GetAccountName() + key.CloudMachineType = e.GetCloud().GetMachineType() + key.CloudProjectId = e.GetCloud().GetProjectId() + key.CloudProjectName = e.GetCloud().GetProjectName() +} + +func setServiceTransactionKey(e *modelpb.APMEvent, key *aggregationpb.ServiceTransactionAggregationKey) { + key.TransactionType = e.GetTransaction().GetType() +} + +func setSpanKey(e *modelpb.APMEvent, key *aggregationpb.SpanAggregationKey) { + var resource, targetType, targetName string + target := e.GetService().GetTarget() + if target != nil { + targetType = target.GetType() + targetName = target.GetName() + } + destSvc := e.GetSpan().GetDestinationService() + if destSvc != nil { + resource = destSvc.GetResource() + } + + key.SpanName = e.GetSpan().GetName() + key.Outcome = e.GetEvent().GetOutcome() + key.TargetType = targetType + key.TargetName = targetName + key.Resource = resource +} + +func setDroppedSpanStatsKey(dss *modelpb.DroppedSpanStats, key *aggregationpb.SpanAggregationKey) { + // Dropped span statistics do not contain span name because it + // would be too expensive to track dropped span stats per span name. + key.Outcome = dss.GetOutcome() + key.TargetType = dss.GetServiceTargetType() + key.TargetName = dss.GetServiceTargetName() + key.Resource = dss.GetDestinationServiceResource() +} + +func formatDuration(d time.Duration) string { + if duration := d.Minutes(); duration >= 1 { + return fmt.Sprintf("%.0fm", duration) + } + return fmt.Sprintf("%.0fs", d.Seconds()) +} diff --git a/copy/apm-aggregation/aggregators/converter_test.go b/copy/apm-aggregation/aggregators/converter_test.go new file mode 100644 index 00000000000..4bc1cf476a7 --- /dev/null +++ b/copy/apm-aggregation/aggregators/converter_test.go @@ -0,0 +1,885 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "fmt" + "net/netip" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/testing/protocmp" + + "github.com/elastic/apm-aggregation/aggregationpb" + "github.com/elastic/apm-aggregation/aggregators/internal/hdrhistogram" + "github.com/elastic/apm-aggregation/aggregators/nullable" + "github.com/elastic/apm-data/model/modelpb" +) + +func TestEventToCombinedMetrics(t *testing.T) { + ts := time.Now().UTC() + receivedTS := ts.Add(time.Second) + baseEvent := &modelpb.APMEvent{ + Timestamp: modelpb.FromTime(ts), + ParentId: "nonroot", + Service: &modelpb.Service{Name: "test"}, + Event: &modelpb.Event{ + Duration: uint64(time.Second), + Outcome: "success", + Received: modelpb.FromTime(receivedTS), + }, + } + for _, tc := range []struct { + name string + input func() []*modelpb.APMEvent + partitions uint16 + expected func() []*aggregationpb.CombinedMetrics + }{ + { + name: "nil-input", + input: func() []*modelpb.APMEvent { + return nil + }, + partitions: 1, + expected: func() []*aggregationpb.CombinedMetrics { + return nil + }, + }, + { + name: "with-zero-rep-count-txn", + input: func() []*modelpb.APMEvent { + event := baseEvent.CloneVT() + event.Transaction = &modelpb.Transaction{ + Name: "testtxn", + Type: "testtyp", + RepresentativeCount: 0, + } + return []*modelpb.APMEvent{event} + }, + partitions: 1, + expected: func() []*aggregationpb.CombinedMetrics { + return []*aggregationpb.CombinedMetrics{ + NewTestCombinedMetrics( + WithEventsTotal(1), + WithYoungestEventTimestamp(receivedTS)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: ts.Truncate(time.Minute), + ServiceName: "test"}). + GetProto(), + } + }, + }, + { + name: "with-good-txn", + input: func() []*modelpb.APMEvent { + event := baseEvent.CloneVT() + event.Transaction = &modelpb.Transaction{ + Name: "testtxn", + Type: "testtyp", + RepresentativeCount: 1, + } + return []*modelpb.APMEvent{event} + }, + partitions: 1, + expected: func() []*aggregationpb.CombinedMetrics { + return []*aggregationpb.CombinedMetrics{ + NewTestCombinedMetrics( + WithEventsTotal(1), + WithYoungestEventTimestamp(receivedTS)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: ts.Truncate(time.Minute), + ServiceName: "test"}). + AddTransaction(transactionAggregationKey{ + TransactionName: "testtxn", + TransactionType: "testtyp", + EventOutcome: "success", + }). + AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: "testtyp", + }).GetProto(), + } + }, + }, + { + name: "with-zero-rep-count-span", + input: func() []*modelpb.APMEvent { + event := baseEvent.CloneVT() + event.Span = &modelpb.Span{ + Name: "testspan", + Type: "testtyp", + RepresentativeCount: 0, + } + return []*modelpb.APMEvent{event} + }, + partitions: 1, + expected: func() []*aggregationpb.CombinedMetrics { + return []*aggregationpb.CombinedMetrics{ + NewTestCombinedMetrics( + WithEventsTotal(1), + WithYoungestEventTimestamp(receivedTS)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: ts.Truncate(time.Minute), + ServiceName: "test"}). + GetProto(), + } + }, + }, + { + name: "with-no-exit-span", + input: func() []*modelpb.APMEvent { + event := baseEvent.CloneVT() + event.Span = &modelpb.Span{ + Name: "testspan", + Type: "testtyp", + RepresentativeCount: 1, + } + return []*modelpb.APMEvent{event} + }, + partitions: 1, + expected: func() []*aggregationpb.CombinedMetrics { + return []*aggregationpb.CombinedMetrics{ + NewTestCombinedMetrics( + WithEventsTotal(1), + WithYoungestEventTimestamp(receivedTS)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: ts.Truncate(time.Minute), + ServiceName: "test"}). + GetProto(), + } + }, + }, + { + name: "with-good-span-svc-target", + input: func() []*modelpb.APMEvent { + event := baseEvent.CloneVT() + event.Span = &modelpb.Span{ + Name: "testspan", + Type: "testtyp", + RepresentativeCount: 1, + } + event.Service.Target = &modelpb.ServiceTarget{ + Name: "psql", + Type: "db", + } + // Current test structs are hardcoded to use 1ns for spans + event.Event.Duration = uint64(time.Nanosecond) + return []*modelpb.APMEvent{event} + }, + partitions: 1, + expected: func() []*aggregationpb.CombinedMetrics { + return []*aggregationpb.CombinedMetrics{ + NewTestCombinedMetrics( + WithEventsTotal(1), + WithYoungestEventTimestamp(receivedTS)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: ts.Truncate(time.Minute), + ServiceName: "test"}). + AddSpan(spanAggregationKey{ + SpanName: "testspan", + TargetName: "psql", + TargetType: "db", + Outcome: "success", + }).GetProto(), + } + }, + }, + { + name: "with-good-span-dest-svc", + input: func() []*modelpb.APMEvent { + event := baseEvent.CloneVT() + event.Span = &modelpb.Span{ + Name: "testspan", + Type: "testtyp", + RepresentativeCount: 1, + DestinationService: &modelpb.DestinationService{ + Resource: "db", + }, + } + // Current test structs are hardcoded to use 1ns for spans + event.Event.Duration = uint64(time.Nanosecond) + return []*modelpb.APMEvent{event} + }, + partitions: 1, + expected: func() []*aggregationpb.CombinedMetrics { + return []*aggregationpb.CombinedMetrics{ + NewTestCombinedMetrics( + WithEventsTotal(1), + WithYoungestEventTimestamp(receivedTS)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: ts.Truncate(time.Minute), + ServiceName: "test"}). + AddSpan(spanAggregationKey{ + SpanName: "testspan", + Resource: "db", + Outcome: "success", + }).GetProto(), + } + }, + }, + { + name: "with-metricset", + input: func() []*modelpb.APMEvent { + event := baseEvent.CloneVT() + event.Metricset = &modelpb.Metricset{ + Name: "testmetricset", + Interval: "1m", + } + return []*modelpb.APMEvent{event} + }, + partitions: 1, + expected: func() []*aggregationpb.CombinedMetrics { + return []*aggregationpb.CombinedMetrics{ + NewTestCombinedMetrics( + WithEventsTotal(1), + WithYoungestEventTimestamp(receivedTS)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: ts.Truncate(time.Minute), + ServiceName: "test"}). + GetProto(), + } + }, + }, + { + name: "with-log", + input: func() []*modelpb.APMEvent { + event := baseEvent.CloneVT() + event.Log = &modelpb.Log{} + return []*modelpb.APMEvent{event} + }, + partitions: 1, + expected: func() []*aggregationpb.CombinedMetrics { + return []*aggregationpb.CombinedMetrics{ + NewTestCombinedMetrics( + WithEventsTotal(1), + WithYoungestEventTimestamp(receivedTS)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: ts.Truncate(time.Minute), + ServiceName: "test"}). + GetProto(), + } + }, + }, + { + name: "with-success-txn-followed-by-unknown-txn", + input: func() []*modelpb.APMEvent { + success := baseEvent.CloneVT() + success.Transaction = &modelpb.Transaction{ + Name: "testtxn1", + Type: "testtyp1", + RepresentativeCount: 1, + } + unknown := baseEvent.CloneVT() + unknown.Event.Outcome = "unknown" + unknown.Transaction = &modelpb.Transaction{ + Name: "testtxn2", + Type: "testtyp2", + RepresentativeCount: 1, + } + return []*modelpb.APMEvent{success, unknown} + }, + partitions: 1, + expected: func() []*aggregationpb.CombinedMetrics { + return []*aggregationpb.CombinedMetrics{ + NewTestCombinedMetrics( + WithEventsTotal(1), + WithYoungestEventTimestamp(receivedTS)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: ts.Truncate(time.Minute), + ServiceName: "test"}). + AddTransaction(transactionAggregationKey{ + TransactionName: "testtxn1", + TransactionType: "testtyp1", + EventOutcome: "success", + }). + AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: "testtyp1", + }).GetProto(), + NewTestCombinedMetrics( + WithEventsTotal(1), + WithYoungestEventTimestamp(receivedTS)). + AddServiceMetrics(serviceAggregationKey{ + Timestamp: ts.Truncate(time.Minute), + ServiceName: "test"}). + AddTransaction(transactionAggregationKey{ + TransactionName: "testtxn2", + TransactionType: "testtyp2", + EventOutcome: "unknown", + }). + AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: "testtyp2", + }, WithEventOutcome("unknown")).GetProto(), + } + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + cmk := CombinedMetricsKey{ + Interval: time.Minute, + ProcessingTime: time.Now().Truncate(time.Minute), + ID: EncodeToCombinedMetricsKeyID(t, "ab01"), + } + var actual []*aggregationpb.CombinedMetrics + collector := func( + _ CombinedMetricsKey, + m *aggregationpb.CombinedMetrics, + ) error { + actual = append(actual, m.CloneVT()) + return nil + } + for _, e := range tc.input() { + err := eventToCombinedMetrics(e, cmk, tc.partitions, collector) + require.NoError(t, err) + } + assert.Empty(t, cmp.Diff( + tc.expected(), actual, + cmp.Comparer(func(a, b hdrhistogram.HybridCountsRep) bool { + return a.Equal(&b) + }), + protocmp.Transform(), + protocmp.IgnoreEmptyMessages(), + )) + }) + } +} + +func TestCombinedMetricsToBatch(t *testing.T) { + ts := time.Now() + youngestEventTS := ts.Add(-time.Second) + aggIvl := time.Minute + processingTime := ts.Truncate(aggIvl) + svcName := "test" + coldstart := nullable.True + var ( + svc = serviceAggregationKey{Timestamp: ts, ServiceName: svcName} + faas = &modelpb.Faas{Id: "f1", ColdStart: coldstart.ToBoolPtr(), Version: "v2", TriggerType: "http"} + span = spanAggregationKey{SpanName: "spn", Resource: "postgresql"} + overflowSpan = spanAggregationKey{TargetName: "_other"} + spanCount = 1 + svcTxn = serviceTransactionAggregationKey{TransactionType: "typ"} + overflowSvcTxn = serviceTransactionAggregationKey{TransactionType: "_other"} + txn = transactionAggregationKey{TransactionName: "txn", TransactionType: "typ"} + txnFaas = transactionAggregationKey{TransactionName: "txn", TransactionType: "typ", + FAASID: faas.Id, FAASColdstart: coldstart, FAASVersion: faas.Version, FAASTriggerType: faas.TriggerType} + overflowTxn = transactionAggregationKey{TransactionName: "_other"} + txnCount = 100 + ) + for _, tc := range []struct { + name string + aggregationInterval time.Duration + combinedMetrics func() *aggregationpb.CombinedMetrics + expectedEvents modelpb.Batch + }{ + { + name: "no_overflow_without_faas", + aggregationInterval: aggIvl, + combinedMetrics: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithYoungestEventTimestamp(youngestEventTS)). + AddServiceMetrics(svc). + AddSpan(span, WithSpanCount(spanCount)). + AddTransaction(txn, WithTransactionCount(txnCount)). + AddServiceTransaction(svcTxn, WithTransactionCount(txnCount)). + GetProto() + }, + expectedEvents: []*modelpb.APMEvent{ + createTestTransactionMetric(ts, aggIvl, svcName, txn, nil, txnCount, 0), + createTestServiceTransactionMetric(ts, aggIvl, svcName, svcTxn, txnCount, 0), + createTestSpanMetric(ts, aggIvl, svcName, span, spanCount, 0), + createTestServiceSummaryMetric(ts, aggIvl, svcName, 0), + }, + }, + { + name: "no_overflow", + aggregationInterval: aggIvl, + combinedMetrics: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithYoungestEventTimestamp(youngestEventTS)). + AddServiceMetrics(svc). + AddSpan(span, WithSpanCount(spanCount)). + AddTransaction(txnFaas, WithTransactionCount(txnCount)). + AddServiceTransaction(svcTxn, WithTransactionCount(txnCount)). + GetProto() + }, + expectedEvents: []*modelpb.APMEvent{ + createTestTransactionMetric(ts, aggIvl, svcName, txn, faas, txnCount, 0), + createTestServiceTransactionMetric(ts, aggIvl, svcName, svcTxn, txnCount, 0), + createTestSpanMetric(ts, aggIvl, svcName, span, spanCount, 0), + createTestServiceSummaryMetric(ts, aggIvl, svcName, 0), + }, + }, + { + name: "overflow", + aggregationInterval: aggIvl, + combinedMetrics: func() *aggregationpb.CombinedMetrics { + tcm := NewTestCombinedMetrics(WithYoungestEventTimestamp(youngestEventTS)) + tcm. + AddServiceMetrics(svc). + AddSpan(span, WithSpanCount(spanCount)). + AddTransaction(txnFaas, WithTransactionCount(txnCount)). + AddServiceTransaction(svcTxn, WithTransactionCount(txnCount)). + AddTransactionOverflow(txn, WithTransactionCount(txnCount)). + AddServiceTransactionOverflow(svcTxn, WithTransactionCount(txnCount)). + AddSpanOverflow(span, WithSpanCount(spanCount)) + // Add global service overflow + tcm. + AddServiceMetricsOverflow( + serviceAggregationKey{Timestamp: ts, ServiceName: "svc_overflow"}) + return tcm.GetProto() + }, + expectedEvents: []*modelpb.APMEvent{ + createTestTransactionMetric(ts, aggIvl, svcName, txnFaas, faas, txnCount, 0), + createTestServiceTransactionMetric(ts, aggIvl, svcName, svcTxn, txnCount, 0), + createTestSpanMetric(ts, aggIvl, svcName, span, spanCount, 0), + createTestServiceSummaryMetric(ts, aggIvl, svcName, 0), + // Events due to overflow + createTestTransactionMetric(processingTime, aggIvl, svcName, overflowTxn, nil, txnCount, 1), + createTestServiceTransactionMetric(processingTime, aggIvl, svcName, overflowSvcTxn, txnCount, 1), + createTestSpanMetric(processingTime, aggIvl, svcName, overflowSpan, spanCount, 1), + createTestServiceSummaryMetric(processingTime, aggIvl, "_other", 1), + }, + }, + { + name: "service_overflow", + aggregationInterval: aggIvl, + combinedMetrics: func() *aggregationpb.CombinedMetrics { + tcm := NewTestCombinedMetrics(WithYoungestEventTimestamp(youngestEventTS)) + tcm.AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}) + tcm.AddServiceMetricsOverflow(serviceAggregationKey{ + Timestamp: ts, ServiceName: "svc1", + GlobalLabelsStr: getTestGlobalLabelsStr(t, "1"), + }) + tcm.AddServiceMetricsOverflow(serviceAggregationKey{ + Timestamp: ts, ServiceName: "svc2", + GlobalLabelsStr: getTestGlobalLabelsStr(t, "2"), + }) + return tcm.GetProto() + }, + expectedEvents: []*modelpb.APMEvent{ + createTestServiceSummaryMetric(ts, aggIvl, "svc1", 0), + createTestServiceSummaryMetric(processingTime, aggIvl, "_other", 2), + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + b, err := CombinedMetricsToBatch( + tc.combinedMetrics(), + processingTime, + tc.aggregationInterval, + ) + assert.NoError(t, err) + assert.Empty(t, cmp.Diff( + tc.expectedEvents, *b, + cmpopts.IgnoreTypes(netip.Addr{}), + cmpopts.SortSlices(func(e1, e2 *modelpb.APMEvent) bool { + m1Name := e1.GetMetricset().GetName() + m2Name := e2.GetMetricset().GetName() + if m1Name != m2Name { + return m1Name < m2Name + } + + a1Name := e1.GetAgent().GetName() + a2Name := e2.GetAgent().GetName() + if a1Name != a2Name { + return a1Name < a2Name + } + + return e1.GetService().GetEnvironment() < e2.GetService().GetEnvironment() + }), + protocmp.Transform(), + protocmp.FilterField( + &modelpb.Event{}, + "received", + cmp.Comparer(func(a, b uint64) bool { + if a > b { + a, b = b, a + } + // The recevied timestamp is set as time.Now in both actual and + // expected events. We assert that both these values are within + // a threshold. + return b-a < uint64(10*time.Second) + }), + ), + )) + }) + } +} + +func BenchmarkCombinedMetricsToBatch(b *testing.B) { + ai := time.Hour + ts := time.Now() + pt := ts.Truncate(ai) + cardinality := 10 + tcm := NewTestCombinedMetrics(). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "bench"}) + for i := 0; i < cardinality; i++ { + txnName := fmt.Sprintf("txn%d", i) + txnType := fmt.Sprintf("typ%d", i) + spanName := fmt.Sprintf("spn%d", i) + tcm. + AddTransaction(transactionAggregationKey{ + TransactionName: txnName, + TransactionType: txnType, + }, WithTransactionCount(200)). + AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: txnType, + }, WithTransactionCount(200)). + AddSpan(spanAggregationKey{ + SpanName: spanName, + }) + } + cm := tcm.GetProto() + b.ResetTimer() + for i := 0; i < b.N; i++ { + batch, err := CombinedMetricsToBatch(cm, pt, ai) + if err != nil { + b.Fatal(err) + } + for _, e := range *batch { + e.ReturnToVTPool() + } + } +} + +func BenchmarkEventToCombinedMetrics(b *testing.B) { + event := &modelpb.APMEvent{ + Timestamp: modelpb.FromTime(time.Now()), + ParentId: "nonroot", + Service: &modelpb.Service{ + Name: "test", + }, + Event: &modelpb.Event{ + Duration: uint64(time.Second), + Outcome: "success", + }, + Transaction: &modelpb.Transaction{ + RepresentativeCount: 1, + Name: "testtxn", + Type: "testtyp", + }, + } + cmk := CombinedMetricsKey{ + Interval: time.Minute, + ProcessingTime: time.Now().Truncate(time.Minute), + ID: EncodeToCombinedMetricsKeyID(b, "ab01"), + } + noop := func(_ CombinedMetricsKey, _ *aggregationpb.CombinedMetrics) error { + return nil + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := eventToCombinedMetrics(event, cmk, 1 /*partitions*/, noop) + if err != nil { + b.Fatal(err) + } + } +} + +func createTestServiceSummaryMetric( + ts time.Time, + ivl time.Duration, + svcName string, + overflowCount int, +) *modelpb.APMEvent { + var metricsetSamples []*modelpb.MetricsetSample + if overflowCount > 0 { + metricsetSamples = []*modelpb.MetricsetSample{ + { + Name: "service_summary.aggregation.overflow_count", + Value: float64(overflowCount), + }, + } + } + return &modelpb.APMEvent{ + Timestamp: modelpb.FromTime(ts), + Event: &modelpb.Event{Received: modelpb.FromTime(time.Now())}, + Metricset: &modelpb.Metricset{ + Name: "service_summary", + Samples: metricsetSamples, + Interval: formatDuration(ivl), + }, + Service: &modelpb.Service{Name: svcName}, + } +} + +func createTestTransactionMetric( + ts time.Time, + ivl time.Duration, + svcName string, + txn transactionAggregationKey, + faas *modelpb.Faas, + count, overflowCount int, +) *modelpb.APMEvent { + histRep := hdrhistogram.New() + histRep.RecordDuration(time.Second, float64(count)) + total, counts, values := histRep.Buckets() + var eventSuccessSummary modelpb.SummaryMetric + switch txn.EventOutcome { + case "success": + eventSuccessSummary.Count = total + eventSuccessSummary.Sum = float64(total) + case "failure": + eventSuccessSummary.Count = total + case "unknown": + // Keep both Count and Sum as 0. + } + transactionDurationSummary := &modelpb.SummaryMetric{ + Count: total, + // only 1 expected element + Sum: values[0] * float64(counts[0]), + } + var metricsetSamples []*modelpb.MetricsetSample + if overflowCount > 0 { + metricsetSamples = []*modelpb.MetricsetSample{ + { + Name: "transaction.aggregation.overflow_count", + Value: float64(overflowCount), + }, + } + } + return &modelpb.APMEvent{ + Timestamp: modelpb.FromTime(ts), + Event: &modelpb.Event{ + SuccessCount: &eventSuccessSummary, + Received: modelpb.FromTime(time.Now()), + }, + Metricset: &modelpb.Metricset{ + Name: "transaction", + Interval: formatDuration(ivl), + Samples: metricsetSamples, + DocCount: total, + }, + Service: &modelpb.Service{Name: svcName}, + Transaction: &modelpb.Transaction{ + Name: txn.TransactionName, + Type: txn.TransactionType, + DurationHistogram: &modelpb.Histogram{ + Counts: counts, + Values: values, + }, + DurationSummary: transactionDurationSummary, + }, + Faas: faas, + } +} + +func createTestServiceTransactionMetric( + ts time.Time, + ivl time.Duration, + svcName string, + svcTxn serviceTransactionAggregationKey, + count, overflowCount int, +) *modelpb.APMEvent { + histRep := hdrhistogram.New() + histRep.RecordDuration(time.Second, float64(count)) + total, counts, values := histRep.Buckets() + transactionDurationSummary := &modelpb.SummaryMetric{ + Count: total, + // only 1 expected element + Sum: values[0] * float64(counts[0]), + } + var metricsetSamples []*modelpb.MetricsetSample + if overflowCount > 0 { + metricsetSamples = []*modelpb.MetricsetSample{ + { + Name: "service_transaction.aggregation.overflow_count", + Value: float64(overflowCount), + }, + } + } + return &modelpb.APMEvent{ + Timestamp: modelpb.FromTime(ts), + Metricset: &modelpb.Metricset{ + Name: "service_transaction", + Interval: formatDuration(ivl), + Samples: metricsetSamples, + DocCount: total, + }, + Service: &modelpb.Service{Name: svcName}, + Transaction: &modelpb.Transaction{ + Type: svcTxn.TransactionType, + DurationHistogram: &modelpb.Histogram{ + Counts: counts, + Values: values, + }, + DurationSummary: transactionDurationSummary, + }, + Event: &modelpb.Event{ + Received: modelpb.FromTime(time.Now()), + SuccessCount: &modelpb.SummaryMetric{ + // test code generates all success events + Count: uint64(count), + Sum: float64(count), + }, + }, + } +} + +func createTestSpanMetric( + ts time.Time, + ivl time.Duration, + svcName string, + span spanAggregationKey, + count, overflowCount int, +) *modelpb.APMEvent { + var metricsetSamples []*modelpb.MetricsetSample + if overflowCount > 0 { + metricsetSamples = []*modelpb.MetricsetSample{ + { + Name: "service_destination.aggregation.overflow_count", + Value: float64(overflowCount), + }, + } + } + var target *modelpb.ServiceTarget + if span.TargetName != "" { + target = &modelpb.ServiceTarget{ + Name: span.TargetName, + } + } + return &modelpb.APMEvent{ + Timestamp: modelpb.FromTime(ts), + Event: &modelpb.Event{Received: modelpb.FromTime(time.Now())}, + Metricset: &modelpb.Metricset{ + Name: "service_destination", + Interval: formatDuration(ivl), + Samples: metricsetSamples, + DocCount: uint64(count), + }, + Service: &modelpb.Service{ + Name: svcName, + Target: target, + }, + Span: &modelpb.Span{ + Name: span.SpanName, + DestinationService: &modelpb.DestinationService{ + Resource: span.Resource, + ResponseTime: &modelpb.AggregatedDuration{ + // test code generates 1 count for 1 ns + Count: uint64(count), + Sum: uint64(time.Duration(count)), + }, + }, + }, + } +} + +func getTestGlobalLabelsStr(t *testing.T, s string) string { + t.Helper() + var gl globalLabels + gl.Labels = make(modelpb.Labels) + gl.Labels["test"] = &modelpb.LabelValue{Value: s} + gls, err := gl.MarshalString() + if err != nil { + t.Fatal(err) + } + return gls +} + +func globalLabelsEvent() *modelpb.APMEvent { + return &modelpb.APMEvent{ + Labels: modelpb.Labels{ + "tag1": &modelpb.LabelValue{ + Value: "1", + Values: nil, + Global: false, + }, + "tag2": &modelpb.LabelValue{ + Value: "2", + Values: nil, + Global: true, + }, + "tag3": &modelpb.LabelValue{ + Value: "", + Values: []string{"a", "b"}, + Global: false, + }, + "tag4": &modelpb.LabelValue{ + Value: "", + Values: []string{"c", "d"}, + Global: true, + }, + }, + NumericLabels: modelpb.NumericLabels{ + "tag1": &modelpb.NumericLabelValue{ + Value: 1.1, + Values: nil, + Global: false, + }, + "tag2": &modelpb.NumericLabelValue{ + Value: 2.2, + Values: nil, + Global: true, + }, + "tag3": &modelpb.NumericLabelValue{ + Value: 0, + Values: []float64{3.3, 4.4}, + Global: false, + }, + "tag4": &modelpb.NumericLabelValue{ + Value: 0, + Values: []float64{5.5, 6.6}, + Global: true, + }, + }, + } +} + +func TestMarshalEventGlobalLabels(t *testing.T) { + e := globalLabelsEvent() + b, err := marshalEventGlobalLabels(e) + require.NoError(t, err) + gl := globalLabels{} + err = gl.UnmarshalBinary(b) + require.NoError(t, err) + assert.Equal(t, modelpb.Labels{ + "tag2": &modelpb.LabelValue{ + Value: "2", + Values: nil, + Global: true, + }, + "tag4": &modelpb.LabelValue{ + Value: "", + Values: []string{"c", "d"}, + Global: true, + }, + }, gl.Labels) + assert.Equal(t, modelpb.NumericLabels{ + "tag2": &modelpb.NumericLabelValue{ + Value: 2.2, + Values: nil, + Global: true, + }, + "tag4": &modelpb.NumericLabelValue{ + Value: 0, + Values: []float64{5.5, 6.6}, + Global: true, + }, + }, gl.NumericLabels) +} + +func TestMarshalEventGlobalLabelsRace(t *testing.T) { + const N = 1000 + wg := sync.WaitGroup{} + for i := 0; i < N; i++ { + wg.Add(1) + go func() { + e := globalLabelsEvent() + b, err := marshalEventGlobalLabels(e) + require.NoError(t, err) + gl := globalLabels{} + err = gl.UnmarshalBinary(b) + require.NoError(t, err) + b, err = gl.MarshalBinary() + require.NoError(t, err) + err = gl.UnmarshalBinary(b) + require.NoError(t, err) + wg.Done() + }() + } + wg.Wait() +} diff --git a/copy/apm-aggregation/aggregators/internal/constraint/constraint.go b/copy/apm-aggregation/aggregators/internal/constraint/constraint.go new file mode 100644 index 00000000000..f5712f51e71 --- /dev/null +++ b/copy/apm-aggregation/aggregators/internal/constraint/constraint.go @@ -0,0 +1,34 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Package constraint hold the definition of a generic constraint structure. +package constraint + +type Constraint struct { + counter int + limit int +} + +func New(initialCount, limit int) *Constraint { + return &Constraint{ + counter: initialCount, + limit: limit, + } +} + +func (c *Constraint) Maxed() bool { + return c.counter >= c.limit +} + +func (c *Constraint) Add(delta int) { + c.counter += delta +} + +func (c *Constraint) Value() int { + return c.counter +} + +func (c *Constraint) Limit() int { + return c.limit +} diff --git a/copy/apm-aggregation/aggregators/internal/hdrhistogram/hdrhistogram.go b/copy/apm-aggregation/aggregators/internal/hdrhistogram/hdrhistogram.go new file mode 100644 index 00000000000..e1786148457 --- /dev/null +++ b/copy/apm-aggregation/aggregators/internal/hdrhistogram/hdrhistogram.go @@ -0,0 +1,393 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// The MIT License (MIT) +// +// Copyright (c) 2014 Coda Hale +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package hdrhistogram provides an optimized histogram for sparse samples. +// This is a stop gap measure until we have [packed histogram implementation](https://www.javadoc.io/static/org.hdrhistogram/HdrHistogram/2.1.12/org/HdrHistogram/PackedHistogram.html). +package hdrhistogram + +import ( + "fmt" + "math" + "math/bits" + "slices" + "time" +) + +const ( + lowestTrackableValue = 1 + highestTrackableValue = 3.6e+9 // 1 hour in microseconds + significantFigures = 2 + + // We scale transaction counts in the histogram, which only permits storing + // integer counts, to allow for fractional transactions due to sampling. + // + // e.g. if the sampling rate is 0.4, then each sampled transaction has a + // representative count of 2.5 (1/0.4). If we receive two such transactions + // we will record a count of 5000 (2 * 2.5 * histogramCountScale). When we + // publish metrics, we will scale down to 5 (5000 / histogramCountScale). + histogramCountScale = 1000 +) + +var ( + unitMagnitude = getUnitMagnitude() + bucketCount = getBucketCount() + subBucketCount = getSubBucketCount() + subBucketHalfCountMagnitude = getSubBucketHalfCountMagnitude() + subBucketHalfCount = getSubBucketHalfCount() + subBucketMask = getSubBucketMask() + countsLen = getCountsLen() +) + +// HistogramRepresentation is an optimization over HDR histogram mainly useful +// for recording values clustered in some range rather than distributed over +// the full range of the HDR histogram. It is based on the [hdrhistogram-go](https://github.com/HdrHistogram/hdrhistogram-go) package. +// The package is not safe for concurrent usage, use an external lock +// protection if required. +type HistogramRepresentation struct { + LowestTrackableValue int64 + HighestTrackableValue int64 + SignificantFigures int64 + CountsRep HybridCountsRep +} + +// New returns a new instance of HistogramRepresentation +func New() *HistogramRepresentation { + return &HistogramRepresentation{ + LowestTrackableValue: lowestTrackableValue, + HighestTrackableValue: highestTrackableValue, + SignificantFigures: significantFigures, + } +} + +// RecordDuration records duration in the histogram representation. It +// supports recording float64 upto 3 decimal places. This is achieved +// by scaling the count. +func (h *HistogramRepresentation) RecordDuration(d time.Duration, n float64) error { + count := int64(math.Round(n * histogramCountScale)) + v := d.Microseconds() + + return h.RecordValues(v, count) +} + +// RecordValues records values in the histogram representation. +func (h *HistogramRepresentation) RecordValues(v, n int64) error { + idx := h.countsIndexFor(v) + if idx < 0 || int32(countsLen) <= idx { + return fmt.Errorf("value %d is too large to be recorded", v) + } + h.CountsRep.Add(idx, n) + return nil +} + +// Merge merges the provided histogram representation. +// TODO: Add support for migration from a histogram representation +// with different parameters. +func (h *HistogramRepresentation) Merge(from *HistogramRepresentation) { + if from == nil { + return + } + from.CountsRep.ForEach(func(bucket int32, value int64) { + h.CountsRep.Add(bucket, value) + }) +} + +// Buckets converts the histogram into ordered slices of counts +// and values per bar along with the total count. +func (h *HistogramRepresentation) Buckets() (uint64, []uint64, []float64) { + counts := make([]uint64, 0, h.CountsRep.Len()) + values := make([]float64, 0, h.CountsRep.Len()) + + var totalCount uint64 + var prevBucket int32 + iter := h.iterator() + iter.nextCountAtIdx() + h.CountsRep.ForEach(func(bucket int32, scaledCounts int64) { + if scaledCounts <= 0 { + return + } + if iter.advance(int(bucket - prevBucket)) { + count := uint64(math.Round(float64(scaledCounts) / histogramCountScale)) + counts = append(counts, count) + values = append(values, float64(iter.highestEquivalentValue)) + totalCount += count + } + prevBucket = bucket + }) + return totalCount, counts, values +} + +func (h *HistogramRepresentation) countsIndexFor(v int64) int32 { + bucketIdx := h.getBucketIndex(v) + subBucketIdx := h.getSubBucketIdx(v, bucketIdx) + return h.countsIndex(bucketIdx, subBucketIdx) +} + +func (h *HistogramRepresentation) countsIndex(bucketIdx, subBucketIdx int32) int32 { + baseBucketIdx := (bucketIdx + 1) << uint(subBucketHalfCountMagnitude) + return baseBucketIdx + subBucketIdx - subBucketHalfCount +} + +func (h *HistogramRepresentation) getBucketIndex(v int64) int32 { + var pow2Ceiling = int64(64 - bits.LeadingZeros64(uint64(v|subBucketMask))) + return int32(pow2Ceiling - int64(unitMagnitude) - + int64(subBucketHalfCountMagnitude+1)) +} + +func (h *HistogramRepresentation) getSubBucketIdx(v int64, idx int32) int32 { + return int32(v >> uint(int64(idx)+int64(unitMagnitude))) +} + +func (h *HistogramRepresentation) valueFromIndex(bucketIdx, subBucketIdx int32) int64 { + return int64(subBucketIdx) << uint(bucketIdx+unitMagnitude) +} + +func (h *HistogramRepresentation) highestEquivalentValue(v int64) int64 { + return h.nextNonEquivalentValue(v) - 1 +} + +func (h *HistogramRepresentation) nextNonEquivalentValue(v int64) int64 { + bucketIdx := h.getBucketIndex(v) + return h.lowestEquivalentValueGivenBucketIdx(v, bucketIdx) + h.sizeOfEquivalentValueRangeGivenBucketIdx(v, bucketIdx) +} + +func (h *HistogramRepresentation) lowestEquivalentValueGivenBucketIdx(v int64, bucketIdx int32) int64 { + subBucketIdx := h.getSubBucketIdx(v, bucketIdx) + return h.valueFromIndex(bucketIdx, subBucketIdx) +} + +func (h *HistogramRepresentation) sizeOfEquivalentValueRangeGivenBucketIdx(v int64, bucketIdx int32) int64 { + subBucketIdx := h.getSubBucketIdx(v, bucketIdx) + adjustedBucket := bucketIdx + if subBucketIdx >= subBucketCount { + adjustedBucket++ + } + return int64(1) << uint(unitMagnitude+adjustedBucket) +} + +func (h *HistogramRepresentation) iterator() *iterator { + return &iterator{ + h: h, + subBucketIdx: -1, + } +} + +type iterator struct { + h *HistogramRepresentation + bucketIdx, subBucketIdx int32 + valueFromIdx int64 + highestEquivalentValue int64 +} + +// advance advances the iterator by count +func (i *iterator) advance(count int) bool { + for c := 0; c < count; c++ { + if !i.nextCountAtIdx() { + return false + } + } + i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx) + return true +} + +func (i *iterator) nextCountAtIdx() bool { + // increment bucket + i.subBucketIdx++ + if i.subBucketIdx >= subBucketCount { + i.subBucketIdx = subBucketHalfCount + i.bucketIdx++ + } + + if i.bucketIdx >= bucketCount { + return false + } + + i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx) + return true +} + +func getSubBucketHalfCountMagnitude() int32 { + largetValueWithSingleUnitResolution := 2 * math.Pow10(significantFigures) + subBucketCountMagnitude := int32(math.Ceil(math.Log2( + largetValueWithSingleUnitResolution, + ))) + if subBucketCountMagnitude < 1 { + return 0 + } + return subBucketCountMagnitude - 1 +} + +func getUnitMagnitude() int32 { + unitMag := int32(math.Floor(math.Log2( + lowestTrackableValue, + ))) + if unitMag < 0 { + return 0 + } + return unitMag +} + +func getSubBucketCount() int32 { + return int32(math.Pow(2, float64(getSubBucketHalfCountMagnitude()+1))) +} + +func getSubBucketHalfCount() int32 { + return getSubBucketCount() / 2 +} + +func getSubBucketMask() int64 { + return int64(getSubBucketCount()-1) << uint(getUnitMagnitude()) +} + +func getCountsLen() int64 { + return int64((getBucketCount() + 1) * (getSubBucketCount() / 2)) +} + +func getBucketCount() int32 { + smallestUntrackableValue := int64(getSubBucketCount()) << uint(getUnitMagnitude()) + bucketsNeeded := int32(1) + for smallestUntrackableValue < highestTrackableValue { + if smallestUntrackableValue > (math.MaxInt64 / 2) { + // next shift will overflow, meaning that bucket could + // represent values up to ones greater than math.MaxInt64, + // so it's the last bucket + return bucketsNeeded + 1 + } + smallestUntrackableValue <<= 1 + bucketsNeeded++ + } + return bucketsNeeded +} + +// bar represents a bar of histogram. Each bar has a bucket, representing +// where the bar belongs to in the histogram range, and the count of values +// in each bucket. +type bar struct { + Bucket int32 + Count int64 +} + +// HybridCountsRep represents a hybrid counts representation for +// sparse histogram. It is optimized to record a single value as +// integer type and more values as map. +type HybridCountsRep struct { + bucket int32 + value int64 + s []bar +} + +// Add adds a new value to a bucket of given index. +func (c *HybridCountsRep) Add(bucket int32, value int64) { + if c.s == nil && c.bucket == 0 && c.value == 0 { + c.bucket = bucket + c.value = value + return + } + if c.s == nil { + // automatic promotion to slice + c.s = make([]bar, 0, 128) // TODO: Use pool + c.s = slices.Insert(c.s, 0, bar{Bucket: c.bucket, Count: c.value}) + c.bucket, c.value = 0, 0 + } + at, found := slices.BinarySearchFunc(c.s, bar{Bucket: bucket}, compareBar) + if found { + c.s[at].Count += value + return + } + c.s = slices.Insert(c.s, at, bar{Bucket: bucket, Count: value}) +} + +// ForEach iterates over each bucket and calls the given function. +func (c *HybridCountsRep) ForEach(f func(int32, int64)) { + if c.s == nil && (c.bucket != 0 || c.value != 0) { + f(c.bucket, c.value) + return + } + for i := range c.s { + f(c.s[i].Bucket, c.s[i].Count) + } +} + +// Len returns the number of buckets currently recording. +func (c *HybridCountsRep) Len() int { + if c.s != nil { + return len(c.s) + } + if c.bucket != 0 || c.value != 0 { + return 1 + } + return 0 +} + +// Get returns the count of values in a given bucket along with a bool +// which is false if the bucket is not found. +func (c *HybridCountsRep) Get(bucket int32) (int64, bool) { + if c.s == nil { + if c.bucket == bucket { + return c.value, true + } + return 0, false + } + at, found := slices.BinarySearchFunc(c.s, bar{Bucket: bucket}, compareBar) + if found { + return c.s[at].Count, true + } + return 0, false +} + +// Reset resets the values recorded. +func (c *HybridCountsRep) Reset() { + c.bucket = 0 + c.value = 0 + c.s = c.s[:0] +} + +// Equal returns true if same bucket and count is recorded in both. +func (c *HybridCountsRep) Equal(h *HybridCountsRep) bool { + if c.Len() != h.Len() { + return false + } + if c.Len() == 0 { + return true + } + equal := true + c.ForEach(func(bucket int32, value1 int64) { + value2, ok := h.Get(bucket) + if !ok || value1 != value2 { + equal = false + } + }) + return equal +} + +func compareBar(a, b bar) int { + if a.Bucket == b.Bucket { + return 0 + } + if a.Bucket > b.Bucket { + return 1 + } + return -1 +} diff --git a/copy/apm-aggregation/aggregators/internal/hdrhistogram/hdrhistogram_test.go b/copy/apm-aggregation/aggregators/internal/hdrhistogram/hdrhistogram_test.go new file mode 100644 index 00000000000..c028caadb37 --- /dev/null +++ b/copy/apm-aggregation/aggregators/internal/hdrhistogram/hdrhistogram_test.go @@ -0,0 +1,96 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package hdrhistogram + +import ( + "math" + "math/rand" + "testing" + + "github.com/HdrHistogram/hdrhistogram-go" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMerge(t *testing.T) { + hist1, hist2 := getTestHistogram(), getTestHistogram() + histRep1, histRep2 := New(), New() + + for i := 0; i < 1_000_000; i++ { + v1, v2 := rand.Int63n(3_600_000_000), rand.Int63n(3_600_000_000) + c1, c2 := rand.Int63n(1_000), rand.Int63n(1_000) + hist1.RecordValues(v1, c1) + histRep1.RecordValues(v1, c1) + hist2.RecordValues(v2, c2) + histRep2.RecordValues(v2, c2) + } + + require.Equal(t, int64(0), hist1.Merge(hist2)) + histRep1.Merge(histRep2) + assert.Empty(t, cmp.Diff(hist1.Export(), convertHistogramRepToSnapshot(histRep1))) +} + +func TestBuckets(t *testing.T) { + buckets := func(h *hdrhistogram.Histogram) (uint64, []uint64, []float64) { + distribution := h.Distribution() + counts := make([]uint64, 0, len(distribution)) + values := make([]float64, 0, len(distribution)) + + var totalCount uint64 + for _, b := range distribution { + if b.Count <= 0 { + continue + } + count := uint64(math.Round(float64(b.Count) / histogramCountScale)) + counts = append(counts, count) + values = append(values, float64(b.To)) + totalCount += count + } + return totalCount, counts, values + } + hist := getTestHistogram() + histRep := New() + + recordValuesForAll := func(v, n int64) { + hist.RecordValues(v, n) + histRep.RecordValues(v, n) + } + + // Explicitly test for recording values with 0 count + recordValuesForAll(rand.Int63n(3_600_000_000), 0) + for i := 0; i < 1_000_000; i++ { + v := rand.Int63n(3_600_000_000) + c := rand.Int63n(1_000) + recordValuesForAll(v, c) + } + actualTotalCount, actualCounts, actualValues := histRep.Buckets() + expectedTotalCount, expectedCounts, expectedValues := buckets(hist) + + assert.Equal(t, expectedTotalCount, actualTotalCount) + assert.Equal(t, expectedCounts, actualCounts) + assert.Equal(t, expectedValues, actualValues) +} + +func getTestHistogram() *hdrhistogram.Histogram { + return hdrhistogram.New( + lowestTrackableValue, + highestTrackableValue, + int(significantFigures), + ) +} + +func convertHistogramRepToSnapshot(h *HistogramRepresentation) *hdrhistogram.Snapshot { + counts := make([]int64, countsLen) + h.CountsRep.ForEach(func(bucket int32, value int64) { + counts[bucket] += value + }) + return &hdrhistogram.Snapshot{ + LowestTrackableValue: h.LowestTrackableValue, + HighestTrackableValue: h.HighestTrackableValue, + SignificantFigures: h.SignificantFigures, + Counts: counts, + } +} diff --git a/copy/apm-aggregation/aggregators/internal/protohash/doc.go b/copy/apm-aggregation/aggregators/internal/protohash/doc.go new file mode 100644 index 00000000000..32d4f97411b --- /dev/null +++ b/copy/apm-aggregation/aggregators/internal/protohash/doc.go @@ -0,0 +1,8 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:generate go run ./generate + +// Package protohash holds functions for hashing aggregationpb.*Key types. +package protohash diff --git a/copy/apm-aggregation/aggregators/internal/protohash/generate/main.go b/copy/apm-aggregation/aggregators/internal/protohash/generate/main.go new file mode 100644 index 00000000000..11019f348e7 --- /dev/null +++ b/copy/apm-aggregation/aggregators/internal/protohash/generate/main.go @@ -0,0 +1,119 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package main + +import ( + "fmt" + "go/types" + "log" + "os" + "strings" + + "golang.org/x/tools/go/packages" +) + +func main() { + const pkgpath = "github.com/elastic/apm-aggregation/aggregationpb" + cfg := &packages.Config{Mode: packages.NeedTypes | packages.NeedTypesInfo} + pkgs, err := packages.Load(cfg, pkgpath) + if err != nil { + log.Fatal(err) + } + + f, err := os.Create("generated.go") + if err != nil { + log.Fatal(err) + } + defer f.Close() + + fmt.Fprintln(f, ` +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Code generated by protohash/generate. DO NOT EDIT. + +package protohash + +import ( + "encoding/binary" + + "github.com/cespare/xxhash/v2" + + "github.com/elastic/apm-aggregation/aggregationpb" +) + +func writeUint32(h *xxhash.Digest, v uint32) { + var buf [4]byte + binary.LittleEndian.PutUint32(buf[:], v) + h.Write(buf[:]) +} + +func writeUint64(h *xxhash.Digest, v uint64) { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], v) + h.Write(buf[:]) +} +`[1:]) + + pkg := pkgs[0] + pkgscope := pkg.Types.Scope() + for _, name := range pkgscope.Names() { + if !strings.HasSuffix(name, "Key") { + continue + } + typeName, ok := pkgscope.Lookup(name).(*types.TypeName) + if !ok || !typeName.Exported() { + continue + } + named := typeName.Type().(*types.Named) + structType, ok := named.Underlying().(*types.Struct) + if !ok { + continue + } + + fmt.Fprintf(f, "func Hash%[1]s(h xxhash.Digest, k *aggregationpb.%[1]s) xxhash.Digest {\n", name) + for i := 0; i < structType.NumFields(); i++ { + field := structType.Field(i) + if !field.Exported() { + continue + } + var unhandled bool + switch fieldType := field.Type().(type) { + case *types.Basic: + switch kind := fieldType.Kind(); kind { + case types.Bool: + fmt.Fprintf(f, " if k.%s {\n h.WriteString(\"1\")\n }\n", field.Name()) + case types.String: + fmt.Fprintf(f, " h.WriteString(k.%s)\n", field.Name()) + case types.Uint32: + fmt.Fprintf(f, " writeUint32(&h, k.%s)\n", field.Name()) + case types.Uint64: + fmt.Fprintf(f, " writeUint64(&h, k.%s)\n", field.Name()) + default: + unhandled = true + } + case *types.Slice: + switch elemType := fieldType.Elem().(type) { + case *types.Basic: + if elemType.Kind() != types.Byte { + unhandled = true + break + } + fmt.Fprintf(f, " h.Write(k.%s)\n", field.Name()) + default: + unhandled = true + } + default: + unhandled = true + } + if unhandled { + panic(fmt.Errorf("unhandled field %s.%s (%v)", name, field.Name(), field.Type())) + } + } + fmt.Fprintln(f, " return h\n}") + fmt.Fprintln(f) + } +} diff --git a/copy/apm-aggregation/aggregators/internal/protohash/generated.go b/copy/apm-aggregation/aggregators/internal/protohash/generated.go new file mode 100644 index 00000000000..7142a67384e --- /dev/null +++ b/copy/apm-aggregation/aggregators/internal/protohash/generated.go @@ -0,0 +1,86 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Code generated by protohash/generate. DO NOT EDIT. + +package protohash + +import ( + "encoding/binary" + + "github.com/cespare/xxhash/v2" + + "github.com/elastic/apm-aggregation/aggregationpb" +) + +func writeUint32(h *xxhash.Digest, v uint32) { + var buf [4]byte + binary.LittleEndian.PutUint32(buf[:], v) + h.Write(buf[:]) +} + +func writeUint64(h *xxhash.Digest, v uint64) { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], v) + h.Write(buf[:]) +} + +func HashServiceAggregationKey(h xxhash.Digest, k *aggregationpb.ServiceAggregationKey) xxhash.Digest { + writeUint64(&h, k.Timestamp) + h.WriteString(k.ServiceName) + h.WriteString(k.ServiceEnvironment) + h.WriteString(k.ServiceLanguageName) + h.WriteString(k.AgentName) + h.Write(k.GlobalLabelsStr) + return h +} + +func HashServiceTransactionAggregationKey(h xxhash.Digest, k *aggregationpb.ServiceTransactionAggregationKey) xxhash.Digest { + h.WriteString(k.TransactionType) + return h +} + +func HashSpanAggregationKey(h xxhash.Digest, k *aggregationpb.SpanAggregationKey) xxhash.Digest { + h.WriteString(k.SpanName) + h.WriteString(k.Outcome) + h.WriteString(k.TargetType) + h.WriteString(k.TargetName) + h.WriteString(k.Resource) + return h +} + +func HashTransactionAggregationKey(h xxhash.Digest, k *aggregationpb.TransactionAggregationKey) xxhash.Digest { + if k.TraceRoot { + h.WriteString("1") + } + h.WriteString(k.ContainerId) + h.WriteString(k.KubernetesPodName) + h.WriteString(k.ServiceVersion) + h.WriteString(k.ServiceNodeName) + h.WriteString(k.ServiceRuntimeName) + h.WriteString(k.ServiceRuntimeVersion) + h.WriteString(k.ServiceLanguageVersion) + h.WriteString(k.HostHostname) + h.WriteString(k.HostName) + h.WriteString(k.HostOsPlatform) + h.WriteString(k.EventOutcome) + h.WriteString(k.TransactionName) + h.WriteString(k.TransactionType) + h.WriteString(k.TransactionResult) + writeUint32(&h, k.FaasColdstart) + h.WriteString(k.FaasId) + h.WriteString(k.FaasName) + h.WriteString(k.FaasVersion) + h.WriteString(k.FaasTriggerType) + h.WriteString(k.CloudProvider) + h.WriteString(k.CloudRegion) + h.WriteString(k.CloudAvailabilityZone) + h.WriteString(k.CloudServiceName) + h.WriteString(k.CloudAccountId) + h.WriteString(k.CloudAccountName) + h.WriteString(k.CloudMachineType) + h.WriteString(k.CloudProjectId) + h.WriteString(k.CloudProjectName) + return h +} diff --git a/copy/apm-aggregation/aggregators/internal/telemetry/config.go b/copy/apm-aggregation/aggregators/internal/telemetry/config.go new file mode 100644 index 00000000000..2ca41b7f186 --- /dev/null +++ b/copy/apm-aggregation/aggregators/internal/telemetry/config.go @@ -0,0 +1,43 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package telemetry + +import ( + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" +) + +type config struct { + Meter metric.Meter +} + +// Option interface is used to configure optional config options. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +func newConfig(opts ...Option) *config { + c := &config{ + Meter: otel.GetMeterProvider().Meter("aggregators"), + } + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// WithMeter configures a meter to use for telemetry. If no meter is +// passed then the meter is created using the global provider. +func WithMeter(meter metric.Meter) Option { + return optionFunc(func(cfg *config) { + cfg.Meter = meter + }) +} diff --git a/copy/apm-aggregation/aggregators/internal/telemetry/config_test.go b/copy/apm-aggregation/aggregators/internal/telemetry/config_test.go new file mode 100644 index 00000000000..1dc3342ce66 --- /dev/null +++ b/copy/apm-aggregation/aggregators/internal/telemetry/config_test.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package telemetry + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/sdk/metric" +) + +func TestConfig(t *testing.T) { + custom := metric.NewMeterProvider().Meter("test") + for _, tt := range []struct { + name string + options []Option + expected func() *config + }{ + { + name: "empty_config", + options: nil, + expected: func() *config { + return &config{ + Meter: otel.GetMeterProvider().Meter("aggregators"), + } + }, + }, + { + name: "config_with_custom_meter_provider", + options: []Option{WithMeter(custom)}, + expected: func() *config { + return &config{ + Meter: custom, + } + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + cfg := newConfig(tt.options...) + + assert.Equal(t, tt.expected(), cfg) + }) + } +} diff --git a/copy/apm-aggregation/aggregators/internal/telemetry/metrics.go b/copy/apm-aggregation/aggregators/internal/telemetry/metrics.go new file mode 100644 index 00000000000..a769b09d25a --- /dev/null +++ b/copy/apm-aggregation/aggregators/internal/telemetry/metrics.go @@ -0,0 +1,301 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Package telemetry holds the logic for emitting telemetry when performing aggregation. +package telemetry + +import ( + "context" + "fmt" + + "github.com/cockroachdb/pebble" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +) + +const ( + bytesUnit = "by" + countUnit = "1" + durationUnit = "s" +) + +// Metrics are a collection of metric used to record all the +// measurements for the aggregators. Sync metrics are exposed +// and used by the calling code to record measurements whereas +// async instruments (mainly pebble database metrics) are +// collected by the observer pattern by passing a metrics provider. +type Metrics struct { + // Synchronous metrics used to record aggregation measurements. + + EventsProcessed metric.Float64Counter + BytesProcessed metric.Int64Counter + MinQueuedDelay metric.Float64Histogram + ProcessingLatency metric.Float64Histogram + MetricsOverflowed metric.Int64Counter + + // Asynchronous metrics used to get pebble metrics and + // record measurements. These are kept unexported as they are + // supposed to be updated via the registered callback. + + pebbleFlushes metric.Int64ObservableCounter + pebbleFlushedBytes metric.Int64ObservableCounter + pebbleCompactions metric.Int64ObservableCounter + pebbleIngestedBytes metric.Int64ObservableCounter + pebbleCompactedBytesRead metric.Int64ObservableCounter + pebbleCompactedBytesWritten metric.Int64ObservableCounter + pebbleMemtableTotalSize metric.Int64ObservableGauge + pebbleTotalDiskUsage metric.Int64ObservableGauge + pebbleReadAmplification metric.Int64ObservableGauge + pebbleNumSSTables metric.Int64ObservableGauge + pebbleTableReadersMemEstimate metric.Int64ObservableGauge + pebblePendingCompaction metric.Int64ObservableGauge + pebbleMarkedForCompactionFiles metric.Int64ObservableGauge + pebbleKeysTombstones metric.Int64ObservableGauge + + // registration represents the token for a the configured callback. + registration metric.Registration +} + +type pebbleProvider func() *pebble.Metrics + +// NewMetrics returns a new instance of the metrics. +func NewMetrics(provider pebbleProvider, opts ...Option) (*Metrics, error) { + var err error + var i Metrics + + cfg := newConfig(opts...) + meter := cfg.Meter + + // Aggregator metrics + i.EventsProcessed, err = meter.Float64Counter( + "events.processed.count", + metric.WithDescription("Number of processed APM Events. Dimensions are used to report the outcome"), + metric.WithUnit(countUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for events processed: %w", err) + } + i.BytesProcessed, err = meter.Int64Counter( + "events.processed.bytes", + metric.WithDescription("Number of bytes processed by the aggregators"), + metric.WithUnit(bytesUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for bytes processed: %w", err) + } + i.ProcessingLatency, err = meter.Float64Histogram( + "events.processed.latency", + metric.WithDescription("Records the processing delays, removes expected delays due to aggregation intervals"), + metric.WithUnit(durationUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for processing delay: %w", err) + } + i.MinQueuedDelay, err = meter.Float64Histogram( + "events.processed.queued-latency", + metric.WithDescription("Records total duration for aggregating a batch w.r.t. its youngest member"), + metric.WithUnit(durationUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for queued delay: %w", err) + } + i.MetricsOverflowed, err = meter.Int64Counter( + "metrics.overflowed.count", + metric.WithDescription( + "Estimated number of metric aggregation keys that resulted in an overflow, per interval and aggregation type", + ), + metric.WithUnit(countUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for metrics overflowed: %w", err) + } + + // Pebble metrics + i.pebbleFlushes, err = meter.Int64ObservableCounter( + "pebble.flushes", + metric.WithDescription("Number of memtable flushes to disk"), + metric.WithUnit(countUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for flushes: %w", err) + } + i.pebbleFlushedBytes, err = meter.Int64ObservableCounter( + "pebble.flushed-bytes", + metric.WithDescription("Bytes written during flush"), + metric.WithUnit(bytesUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for flushed bytes: %w", err) + } + i.pebbleCompactions, err = meter.Int64ObservableCounter( + "pebble.compactions", + metric.WithDescription("Number of table compactions"), + metric.WithUnit(countUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for compactions: %w", err) + } + i.pebbleIngestedBytes, err = meter.Int64ObservableCounter( + "pebble.ingested-bytes", + metric.WithDescription("Bytes ingested"), + metric.WithUnit(bytesUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for ingested bytes: %w", err) + } + i.pebbleCompactedBytesRead, err = meter.Int64ObservableCounter( + "pebble.compacted-bytes-read", + metric.WithDescription("Bytes read during compaction"), + metric.WithUnit(bytesUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for compacted bytes read: %w", err) + } + i.pebbleCompactedBytesWritten, err = meter.Int64ObservableCounter( + "pebble.compacted-bytes-written", + metric.WithDescription("Bytes written during compaction"), + metric.WithUnit(bytesUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for compacted bytes written: %w", err) + } + i.pebbleMemtableTotalSize, err = meter.Int64ObservableGauge( + "pebble.memtable.total-size", + metric.WithDescription("Current size of memtable in bytes"), + metric.WithUnit(bytesUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for memtable size: %w", err) + } + i.pebbleTotalDiskUsage, err = meter.Int64ObservableGauge( + "pebble.disk.usage", + metric.WithDescription("Total disk usage by pebble, including live and obsolete files"), + metric.WithUnit(bytesUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for total disk usage: %w", err) + } + i.pebbleReadAmplification, err = meter.Int64ObservableGauge( + "pebble.read-amplification", + metric.WithDescription("Current read amplification for the db"), + metric.WithUnit(countUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for read amplification: %w", err) + } + i.pebbleNumSSTables, err = meter.Int64ObservableGauge( + "pebble.num-sstables", + metric.WithDescription("Current number of storage engine SSTables"), + metric.WithUnit(countUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for count of sstables: %w", err) + } + i.pebbleTableReadersMemEstimate, err = meter.Int64ObservableGauge( + "pebble.table-readers-mem-estimate", + metric.WithDescription("Memory used by index and filter blocks"), + metric.WithUnit(bytesUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for table cache readers: %w", err) + } + i.pebblePendingCompaction, err = meter.Int64ObservableGauge( + "pebble.estimated-pending-compaction", + metric.WithDescription("Estimated pending compaction bytes"), + metric.WithUnit(bytesUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for pending compaction: %w", err) + } + i.pebbleMarkedForCompactionFiles, err = meter.Int64ObservableGauge( + "pebble.marked-for-compaction-files", + metric.WithDescription("Count of SSTables marked for compaction"), + metric.WithUnit(countUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for compaction marked files: %w", err) + } + i.pebbleKeysTombstones, err = meter.Int64ObservableGauge( + "pebble.keys.tombstone.count", + metric.WithDescription("Approximate count of delete keys across the storage engine"), + metric.WithUnit(countUnit), + ) + if err != nil { + return nil, fmt.Errorf("failed to create metric for tombstones: %w", err) + } + + if err := i.registerCallback(meter, provider); err != nil { + return nil, fmt.Errorf("failed to register callback: %w", err) + } + return &i, nil +} + +// CleanUp unregisters any registered callback for collecting async +// measurements. +func (i *Metrics) CleanUp() error { + if i == nil || i.registration == nil { + return nil + } + if err := i.registration.Unregister(); err != nil { + return fmt.Errorf("failed to unregister callback: %w", err) + } + return nil +} + +func (i *Metrics) registerCallback(meter metric.Meter, provider pebbleProvider) (err error) { + i.registration, err = meter.RegisterCallback(func(ctx context.Context, obs metric.Observer) error { + pm := provider() + obs.ObserveInt64(i.pebbleMemtableTotalSize, int64(pm.MemTable.Size)) + obs.ObserveInt64(i.pebbleTotalDiskUsage, int64(pm.DiskSpaceUsage())) + + obs.ObserveInt64(i.pebbleFlushes, pm.Flush.Count) + obs.ObserveInt64(i.pebbleFlushedBytes, int64(pm.Levels[0].BytesFlushed)) + + obs.ObserveInt64(i.pebbleCompactions, pm.Compact.Count) + obs.ObserveInt64(i.pebblePendingCompaction, int64(pm.Compact.EstimatedDebt)) + obs.ObserveInt64(i.pebbleMarkedForCompactionFiles, int64(pm.Compact.MarkedFiles)) + + obs.ObserveInt64(i.pebbleTableReadersMemEstimate, pm.TableCache.Size) + obs.ObserveInt64(i.pebbleKeysTombstones, int64(pm.Keys.TombstoneCount)) + + lm := pm.Total() + obs.ObserveInt64(i.pebbleNumSSTables, lm.NumFiles) + obs.ObserveInt64(i.pebbleIngestedBytes, int64(lm.BytesIngested)) + obs.ObserveInt64(i.pebbleCompactedBytesRead, int64(lm.BytesRead)) + obs.ObserveInt64(i.pebbleCompactedBytesWritten, int64(lm.BytesCompacted)) + obs.ObserveInt64(i.pebbleReadAmplification, int64(lm.Sublevels)) + return nil + }, + i.pebbleMemtableTotalSize, + i.pebbleTotalDiskUsage, + i.pebbleFlushes, + i.pebbleFlushedBytes, + i.pebbleCompactions, + i.pebbleIngestedBytes, + i.pebbleCompactedBytesRead, + i.pebbleCompactedBytesWritten, + i.pebbleReadAmplification, + i.pebbleNumSSTables, + i.pebbleTableReadersMemEstimate, + i.pebblePendingCompaction, + i.pebbleMarkedForCompactionFiles, + i.pebbleKeysTombstones, + ) + return +} + +// WithSuccess returns an attribute representing a successful event outcome. +func WithSuccess() attribute.KeyValue { + return WithOutcome("success") +} + +// WithFailure returns an attribute representing a failed event outcome. +func WithFailure() attribute.KeyValue { + return WithOutcome("failure") +} + +// WithOutcome returns an attribute for event outcome. +func WithOutcome(outcome string) attribute.KeyValue { + return attribute.String("outcome", outcome) +} diff --git a/copy/apm-aggregation/aggregators/internal/telemetry/metrics_test.go b/copy/apm-aggregation/aggregators/internal/telemetry/metrics_test.go new file mode 100644 index 00000000000..f5ba135994f --- /dev/null +++ b/copy/apm-aggregation/aggregators/internal/telemetry/metrics_test.go @@ -0,0 +1,193 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package telemetry + +import ( + "context" + "testing" + + "github.com/cockroachdb/pebble" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" +) + +func TestNewInstruments(t *testing.T) { + expected := []metricdata.Metrics{ + { + Name: "pebble.flushes", + Description: "Number of memtable flushes to disk", + Unit: "1", + Data: metricdata.Sum[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + }, + }, + { + Name: "pebble.flushed-bytes", + Description: "Bytes written during flush", + Unit: "by", + Data: metricdata.Sum[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + }, + }, + { + Name: "pebble.compactions", + Description: "Number of table compactions", + Unit: "1", + Data: metricdata.Sum[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + }, + }, + { + Name: "pebble.ingested-bytes", + Description: "Bytes ingested", + Unit: "by", + Data: metricdata.Sum[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + }, + }, + { + Name: "pebble.compacted-bytes-read", + Description: "Bytes read during compaction", + Unit: "by", + Data: metricdata.Sum[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + }, + }, + { + Name: "pebble.compacted-bytes-written", + Description: "Bytes written during compaction", + Unit: "by", + Data: metricdata.Sum[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + }, + }, + { + Name: "pebble.memtable.total-size", + Description: "Current size of memtable in bytes", + Unit: "by", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + }, + }, + { + Name: "pebble.disk.usage", + Description: "Total disk usage by pebble, including live and obsolete files", + Unit: "by", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + }, + }, + { + Name: "pebble.read-amplification", + Description: "Current read amplification for the db", + Unit: "1", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + }, + }, + { + Name: "pebble.num-sstables", + Description: "Current number of storage engine SSTables", + Unit: "1", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + }, + }, + { + Name: "pebble.table-readers-mem-estimate", + Description: "Memory used by index and filter blocks", + Unit: "by", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + }, + }, + { + Name: "pebble.estimated-pending-compaction", + Description: "Estimated pending compaction bytes", + Unit: "by", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + }, + }, + { + Name: "pebble.marked-for-compaction-files", + Description: "Count of SSTables marked for compaction", + Unit: "1", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + }, + }, + { + Name: "pebble.keys.tombstone.count", + Description: "Approximate count of delete keys across the storage engine", + Unit: "1", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 0}, + }, + }, + }, + } + + rdr := metric.NewManualReader() + meter := metric.NewMeterProvider(metric.WithReader(rdr)).Meter("test") + instruments, err := NewMetrics( + func() *pebble.Metrics { return &pebble.Metrics{} }, + WithMeter(meter), + ) + + require.NoError(t, err) + require.NotNil(t, instruments) + var rm metricdata.ResourceMetrics + assert.NoError(t, rdr.Collect(context.Background(), &rm)) + + require.Len(t, rm.ScopeMetrics, 1) + sm := rm.ScopeMetrics[0] + require.Len(t, sm.Metrics, len(expected)) + for i, em := range expected { + metricdatatest.AssertEqual(t, em, sm.Metrics[i], metricdatatest.IgnoreTimestamp()) + } +} diff --git a/copy/apm-aggregation/aggregators/logging.go b/copy/apm-aggregation/aggregators/logging.go new file mode 100644 index 00000000000..7247485eb14 --- /dev/null +++ b/copy/apm-aggregation/aggregators/logging.go @@ -0,0 +1,23 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "go.opentelemetry.io/otel/attribute" + "go.uber.org/zap" +) + +// otelKVsToZapFields converts []attribute.KeyValue to []zap.Field. +// Designed to work with CombinedMetricsIDToKVs for logging. +func otelKVsToZapFields(kvs []attribute.KeyValue) []zap.Field { + if kvs == nil { + return nil + } + fields := make([]zap.Field, len(kvs)) + for i, kv := range kvs { + fields[i] = zap.Any(string(kv.Key), kv.Value.AsInterface()) + } + return fields +} diff --git a/copy/apm-aggregation/aggregators/merger.go b/copy/apm-aggregation/aggregators/merger.go new file mode 100644 index 00000000000..a9ed047c03d --- /dev/null +++ b/copy/apm-aggregation/aggregators/merger.go @@ -0,0 +1,450 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "io" + "slices" + "sort" + + "github.com/cespare/xxhash/v2" + + "github.com/elastic/apm-aggregation/aggregationpb" + "github.com/elastic/apm-aggregation/aggregators/internal/constraint" + "github.com/elastic/apm-aggregation/aggregators/internal/protohash" +) + +type combinedMetricsMerger struct { + limits Limits + constraints constraints + metrics combinedMetrics +} + +func (m *combinedMetricsMerger) MergeNewer(value []byte) error { + from := &aggregationpb.CombinedMetrics{} + if err := from.UnmarshalVT(value); err != nil { + return err + } + m.merge(from) + return nil +} + +func (m *combinedMetricsMerger) MergeOlder(value []byte) error { + from := &aggregationpb.CombinedMetrics{} + if err := from.UnmarshalVT(value); err != nil { + return err + } + m.merge(from) + return nil +} + +func (m *combinedMetricsMerger) Finish(includesBase bool) ([]byte, io.Closer, error) { + pb := m.metrics.ToProto() + data, err := pb.MarshalVT() + return data, nil, err +} + +func (m *combinedMetricsMerger) merge(from *aggregationpb.CombinedMetrics) { + // We merge the below fields irrespective of the services present + // because it is possible for services to be empty if the event + // does not fit the criteria for aggregations. + m.metrics.EventsTotal += from.EventsTotal + if m.metrics.YoungestEventTimestamp < from.YoungestEventTimestamp { + m.metrics.YoungestEventTimestamp = from.YoungestEventTimestamp + } + // If there is overflow due to max services in either of the buckets being + // merged then we can merge the overflow buckets without considering any + // other scenarios. + if len(from.OverflowServicesEstimator) > 0 { + mergeOverflow(&m.metrics.OverflowServices, from.OverflowServices) + mergeEstimator( + &m.metrics.OverflowServicesEstimator, + hllSketch(from.OverflowServicesEstimator), + ) + } + + if len(from.ServiceMetrics) == 0 { + return + } + if m.metrics.Services == nil { + m.metrics.Services = make(map[serviceAggregationKey]serviceMetrics) + } + + // Iterate over the services in the _from_ combined metrics and merge them + // into the _to_ combined metrics as per the following rules: + // 1. If the service in the _from_ bucket is also present in the _to_ + // bucket then merge them. + // 2. If the service in the _from_ bucket is not in the _to_ bucket: + // 2.a. If the _to_ bucket hasn't breached the max services limit then + // create a new service in _to_ bucket and merge. + // 2.b. Else, merge the _from_ bucket to the overflow service bucket + // of the _to_ combined metrics. + for i := range from.ServiceMetrics { + fromSvc := from.ServiceMetrics[i] + serviceKeyHash := protohash.HashServiceAggregationKey(xxhash.Digest{}, fromSvc.Key) + var sk serviceAggregationKey + sk.FromProto(fromSvc.Key) + toSvc, svcOverflow := getServiceMetrics(&m.metrics, sk, m.limits.MaxServices) + if svcOverflow { + mergeOverflow(&m.metrics.OverflowServices, fromSvc.Metrics.OverflowGroups) + mergeToOverflowFromServiceMetrics(&m.metrics.OverflowServices, fromSvc.Metrics, serviceKeyHash) + insertHash(&m.metrics.OverflowServicesEstimator, serviceKeyHash.Sum64()) + continue + } + if fromSvc.Metrics != nil { + mergeOverflow(&toSvc.OverflowGroups, fromSvc.Metrics.OverflowGroups) + mergeTransactionGroups( + toSvc.TransactionGroups, + fromSvc.Metrics.TransactionMetrics, + constraint.New( + len(toSvc.TransactionGroups), + m.limits.MaxTransactionGroupsPerService, + ), + m.constraints.totalTransactionGroups, + serviceKeyHash, + &toSvc.OverflowGroups.OverflowTransaction, + ) + mergeServiceTransactionGroups( + toSvc.ServiceTransactionGroups, + fromSvc.Metrics.ServiceTransactionMetrics, + constraint.New( + len(toSvc.ServiceTransactionGroups), + m.limits.MaxServiceTransactionGroupsPerService, + ), + m.constraints.totalServiceTransactionGroups, + serviceKeyHash, + &toSvc.OverflowGroups.OverflowServiceTransaction, + ) + mergeSpanGroups( + toSvc.SpanGroups, + fromSvc.Metrics.SpanMetrics, + constraint.New( + len(toSvc.SpanGroups), + m.limits.MaxSpanGroupsPerService, + ), + m.constraints.totalSpanGroups, + serviceKeyHash, + &toSvc.OverflowGroups.OverflowSpan, + ) + } + m.metrics.Services[sk] = toSvc + } +} + +// mergeTransactionGroups merges transaction aggregation groups for two combined metrics +// considering max transaction groups and max transaction groups per service limits. +func mergeTransactionGroups( + to map[transactionAggregationKey]*aggregationpb.KeyedTransactionMetrics, + from []*aggregationpb.KeyedTransactionMetrics, + perSvcConstraint, globalConstraint *constraint.Constraint, + hash xxhash.Digest, + overflowTo *overflowTransaction, +) { + for i := range from { + fromTxn := from[i] + var tk transactionAggregationKey + tk.FromProto(fromTxn.Key) + toTxn, ok := to[tk] + if !ok { + overflowed := perSvcConstraint.Maxed() || globalConstraint.Maxed() + if overflowed { + fromTxnKeyHash := protohash.HashTransactionAggregationKey(hash, fromTxn.Key) + overflowTo.Merge(fromTxn.Metrics, fromTxnKeyHash.Sum64()) + continue + } + perSvcConstraint.Add(1) + globalConstraint.Add(1) + + to[tk] = fromTxn.CloneVT() + continue + } + mergeKeyedTransactionMetrics(toTxn, fromTxn) + } +} + +// mergeServiceTransactionGroups merges service transaction aggregation groups for two +// combined metrics considering max service transaction groups and max service +// transaction groups per service limits. +func mergeServiceTransactionGroups( + to map[serviceTransactionAggregationKey]*aggregationpb.KeyedServiceTransactionMetrics, + from []*aggregationpb.KeyedServiceTransactionMetrics, + perSvcConstraint, globalConstraint *constraint.Constraint, + hash xxhash.Digest, + overflowTo *overflowServiceTransaction, +) { + for i := range from { + fromSvcTxn := from[i] + var stk serviceTransactionAggregationKey + stk.FromProto(fromSvcTxn.Key) + toSvcTxn, ok := to[stk] + if !ok { + overflowed := perSvcConstraint.Maxed() || globalConstraint.Maxed() + if overflowed { + fromSvcTxnKeyHash := protohash.HashServiceTransactionAggregationKey(hash, fromSvcTxn.Key) + overflowTo.Merge(fromSvcTxn.Metrics, fromSvcTxnKeyHash.Sum64()) + continue + } + perSvcConstraint.Add(1) + globalConstraint.Add(1) + + to[stk] = fromSvcTxn.CloneVT() + continue + } + mergeKeyedServiceTransactionMetrics(toSvcTxn, fromSvcTxn) + } +} + +// mergeSpanGroups merges span aggregation groups for two combined metrics considering +// max span groups and max span groups per service limits. +func mergeSpanGroups( + to map[spanAggregationKey]*aggregationpb.KeyedSpanMetrics, + from []*aggregationpb.KeyedSpanMetrics, + perSvcConstraint, globalConstraint *constraint.Constraint, + hash xxhash.Digest, + overflowTo *overflowSpan, +) { + for i := range from { + fromSpan := from[i] + var spk spanAggregationKey + spk.FromProto(fromSpan.Key) + toSpan, ok := to[spk] + if !ok { + // Protect against agents that send high cardinality span names by dropping + // span.name if more than half of the per svc span group limit is reached. + originalSpanName := fromSpan.Key.SpanName + half := perSvcConstraint.Limit() / 2 + if perSvcConstraint.Value() >= half { + spk.SpanName = "" + fromSpan.Key.SpanName = "" + toSpan, ok = to[spk] + } + if !ok { + overflowed := perSvcConstraint.Maxed() || globalConstraint.Maxed() + if overflowed { + // Restore span name in case it was dropped above, + // for cardinality estimation. + fromSpan.Key.SpanName = originalSpanName + fromSpanKeyHash := protohash.HashSpanAggregationKey(hash, fromSpan.Key) + overflowTo.Merge(fromSpan.Metrics, fromSpanKeyHash.Sum64()) + continue + } + perSvcConstraint.Add(1) + globalConstraint.Add(1) + + to[spk] = fromSpan.CloneVT() + continue + } + } + mergeKeyedSpanMetrics(toSpan, fromSpan) + } +} + +func mergeToOverflowFromServiceMetrics( + to *overflow, + from *aggregationpb.ServiceMetrics, + hash xxhash.Digest, +) { + if from == nil { + return + } + for _, ktm := range from.TransactionMetrics { + ktmKeyHash := protohash.HashTransactionAggregationKey(hash, ktm.Key) + to.OverflowTransaction.Merge(ktm.Metrics, ktmKeyHash.Sum64()) + } + for _, kstm := range from.ServiceTransactionMetrics { + kstmKeyHash := protohash.HashServiceTransactionAggregationKey(hash, kstm.Key) + to.OverflowServiceTransaction.Merge(kstm.Metrics, kstmKeyHash.Sum64()) + } + for _, ksm := range from.SpanMetrics { + ksmKeyHash := protohash.HashSpanAggregationKey(hash, ksm.Key) + to.OverflowSpan.Merge(ksm.Metrics, ksmKeyHash.Sum64()) + } +} + +func mergeOverflow( + to *overflow, + fromproto *aggregationpb.Overflow, +) { + if fromproto == nil { + return + } + var from overflow + from.FromProto(fromproto) + to.OverflowTransaction.MergeOverflow(&from.OverflowTransaction) + to.OverflowServiceTransaction.MergeOverflow(&from.OverflowServiceTransaction) + to.OverflowSpan.MergeOverflow(&from.OverflowSpan) +} + +func mergeKeyedTransactionMetrics( + to, from *aggregationpb.KeyedTransactionMetrics, +) { + if from.Metrics == nil { + return + } + if to.Metrics == nil { + to.Metrics = &aggregationpb.TransactionMetrics{} + } + mergeTransactionMetrics(to.Metrics, from.Metrics) +} + +func mergeTransactionMetrics( + to, from *aggregationpb.TransactionMetrics, +) { + if to.Histogram == nil && from.Histogram != nil { + to.Histogram = &aggregationpb.HDRHistogram{} + } + if to.Histogram != nil && from.Histogram != nil { + mergeHistogram(to.Histogram, from.Histogram) + } +} + +func mergeKeyedServiceTransactionMetrics( + to, from *aggregationpb.KeyedServiceTransactionMetrics, +) { + if from.Metrics == nil { + return + } + if to.Metrics == nil { + to.Metrics = &aggregationpb.ServiceTransactionMetrics{} + } + mergeServiceTransactionMetrics(to.Metrics, from.Metrics) +} + +func mergeServiceTransactionMetrics( + to, from *aggregationpb.ServiceTransactionMetrics, +) { + if to.Histogram == nil && from.Histogram != nil { + to.Histogram = &aggregationpb.HDRHistogram{} + } + if to.Histogram != nil && from.Histogram != nil { + mergeHistogram(to.Histogram, from.Histogram) + } + to.FailureCount += from.FailureCount + to.SuccessCount += from.SuccessCount +} + +func mergeKeyedSpanMetrics(to, from *aggregationpb.KeyedSpanMetrics) { + if from.Metrics == nil { + return + } + if to.Metrics == nil { + to.Metrics = &aggregationpb.SpanMetrics{} + } + mergeSpanMetrics(to.Metrics, from.Metrics) +} + +func mergeSpanMetrics(to, from *aggregationpb.SpanMetrics) { + to.Count += from.Count + to.Sum += from.Sum +} + +// mergeHistogram merges two proto representation of HDRHistogram. The +// merge assumes both histograms are created with same arguments and +// their representations are sorted by bucket. +func mergeHistogram(to, from *aggregationpb.HDRHistogram) { + if len(from.Buckets) == 0 { + return + } + + if len(to.Buckets) == 0 { + to.Buckets = append(to.Buckets, from.Buckets...) + to.Counts = append(to.Counts, from.Counts...) + return + } + + startToIdx, found := sort.Find(len(to.Buckets), func(i int) int { + return int(from.Buckets[0] - to.Buckets[i]) + }) + if found && len(from.Buckets) == 1 { + // optimize for single value of `from` also found in `to` + to.Counts[startToIdx] += from.Counts[0] + return + } + + // Since all values of `from` must be greater than the first value, we can + // limit the search space in `to` to [startToIdx, len(to.Buckets)) + requiredLen := len(to.Buckets) + len(from.Buckets) + for toIdx, fromIdx := startToIdx, 0; toIdx < len(to.Buckets) && fromIdx < len(from.Buckets); { + v := to.Buckets[toIdx] - from.Buckets[fromIdx] + switch { + case v == 0: + // For every bucket that is common, we need one less bucket in final slice + requiredLen-- + toIdx++ + fromIdx++ + case v < 0: + toIdx++ + case v > 0: + fromIdx++ + } + } + + toIdx, fromIdx := len(to.Buckets)-1, len(from.Buckets)-1 + to.Buckets = slices.Grow(to.Buckets, requiredLen-len(to.Buckets))[:requiredLen] + to.Counts = slices.Grow(to.Counts, requiredLen-len(to.Counts))[:requiredLen] + for idx := len(to.Buckets) - 1; idx >= 0; idx-- { + if fromIdx < 0 { + break + } + if toIdx < startToIdx { + copy(to.Counts[startToIdx:idx+1], from.Counts[0:fromIdx+1]) + copy(to.Buckets[startToIdx:idx+1], from.Buckets[0:fromIdx+1]) + break + } + v := to.Buckets[toIdx] - from.Buckets[fromIdx] + switch { + case v == 0: + to.Counts[idx] = to.Counts[toIdx] + from.Counts[fromIdx] + to.Buckets[idx] = to.Buckets[toIdx] + toIdx-- + fromIdx-- + case v > 0: + to.Counts[idx] = to.Counts[toIdx] + to.Buckets[idx] = to.Buckets[toIdx] + toIdx-- + case v < 0: + to.Counts[idx] = from.Counts[fromIdx] + to.Buckets[idx] = from.Buckets[fromIdx] + fromIdx-- + } + } +} + +// getServiceMetrics returns the service metric from a combined metrics based on the +// service key argument, creating one if needed. A second bool return value indicates +// if a service is returned or no service can be created due to max svcs limit breach. +func getServiceMetrics(cm *combinedMetrics, svcKey serviceAggregationKey, maxSvcs int) (serviceMetrics, bool) { + srcSvc, ok := cm.Services[svcKey] + if !ok { + if len(cm.Services) < maxSvcs { + return newServiceMetrics(), false + } + return serviceMetrics{}, true + } + return srcSvc, false +} + +func newServiceMetrics() serviceMetrics { + return serviceMetrics{ + TransactionGroups: make(map[transactionAggregationKey]*aggregationpb.KeyedTransactionMetrics), + ServiceTransactionGroups: make(map[serviceTransactionAggregationKey]*aggregationpb.KeyedServiceTransactionMetrics), + SpanGroups: make(map[spanAggregationKey]*aggregationpb.KeyedSpanMetrics), + } +} + +// constraints is a group of constraints to be observed during merge operations. +type constraints struct { + totalTransactionGroups *constraint.Constraint + totalServiceTransactionGroups *constraint.Constraint + totalSpanGroups *constraint.Constraint +} + +func newConstraints(limits Limits) constraints { + return constraints{ + totalTransactionGroups: constraint.New(0, limits.MaxTransactionGroups), + totalServiceTransactionGroups: constraint.New(0, limits.MaxServiceTransactionGroups), + totalSpanGroups: constraint.New(0, limits.MaxSpanGroups), + } +} diff --git a/copy/apm-aggregation/aggregators/merger_test.go b/copy/apm-aggregation/aggregators/merger_test.go new file mode 100644 index 00000000000..43a2de031e8 --- /dev/null +++ b/copy/apm-aggregation/aggregators/merger_test.go @@ -0,0 +1,935 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "math/rand" + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/testing/protocmp" + + "github.com/elastic/apm-aggregation/aggregationpb" + "github.com/elastic/apm-aggregation/aggregators/internal/hdrhistogram" +) + +func TestMerge(t *testing.T) { + ts := time.Unix(0, 0).UTC() + for _, tc := range []struct { + name string + limits Limits + to func() combinedMetrics + from func() *aggregationpb.CombinedMetrics + expected func() combinedMetrics + }{ + { + name: "no_overflow_with_count_values", + limits: Limits{ + MaxSpanGroups: 2, + MaxSpanGroupsPerService: 2, + MaxTransactionGroups: 2, + MaxTransactionGroupsPerService: 2, + MaxServiceTransactionGroups: 2, + MaxServiceTransactionGroupsPerService: 2, + MaxServices: 2, + }, + to: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(10)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(5)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(5)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(5)). + Get() + }, + from: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(4)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(2)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(2)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(2)). + GetProto() + }, + expected: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(14)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(7)). + Get() + }, + }, + { + name: "no_overflow_with_histograms_in_to", + limits: Limits{ + MaxSpanGroups: 2, + MaxSpanGroupsPerService: 2, + MaxTransactionGroups: 2, + MaxTransactionGroupsPerService: 2, + MaxServiceTransactionGroups: 2, + MaxServiceTransactionGroupsPerService: 2, + MaxServices: 2, + }, + to: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(1000)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(500)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(500)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(500)). + Get() + }, + from: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(4)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(2)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(2)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(2)). + GetProto() + }, + expected: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(1004)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(502)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(502)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(502)). + Get() + }, + }, + { + name: "no_overflow_with_histogram_in_from", + limits: Limits{ + MaxSpanGroups: 2, + MaxSpanGroupsPerService: 2, + MaxTransactionGroups: 2, + MaxTransactionGroupsPerService: 2, + MaxServiceTransactionGroups: 2, + MaxServiceTransactionGroupsPerService: 2, + MaxServices: 2, + }, + to: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(4)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(2)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(2)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(2)). + Get() + }, + from: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(1000)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(500)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(500)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(500)). + GetProto() + }, + expected: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(1004)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(502)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(502)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(502)). + Get() + }, + }, + { + name: "no_overflow_with_histogram_in_both", + limits: Limits{ + MaxSpanGroups: 2, + MaxSpanGroupsPerService: 2, + MaxTransactionGroups: 2, + MaxTransactionGroupsPerService: 2, + MaxServiceTransactionGroups: 2, + MaxServiceTransactionGroupsPerService: 2, + MaxServices: 2, + }, + to: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(1400)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(700)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(700)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(700)). + Get() + }, + from: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(1000)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(500)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(500)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(500)). + GetProto() + }, + expected: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(2400)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(1200)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(1200)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(1200)). + Get() + }, + }, + { + name: "per_svc_overflow_due_to_merge", + limits: Limits{ + MaxSpanGroups: 100, + MaxSpanGroupsPerService: 1, + MaxTransactionGroups: 100, + MaxTransactionGroupsPerService: 1, + MaxServiceTransactionGroups: 100, + MaxServiceTransactionGroupsPerService: 1, + MaxServices: 1, + }, + to: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(14)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(7)). + Get() + }, + from: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(10)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span2"}, WithSpanCount(5)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type2"}, + WithTransactionCount(5)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn2", TransactionType: "type2"}, + WithTransactionCount(5)). + GetProto() + }, + expected: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(24)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + // no merge as span, transaction, and service transaction will overflow + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(7)). + // all span, transaction, and service transaction from _from_ will overflow + AddSpanOverflow(spanAggregationKey{SpanName: "span2"}, WithSpanCount(5)). + AddServiceTransactionOverflow( + serviceTransactionAggregationKey{TransactionType: "type2"}, + WithTransactionCount(5)). + AddTransactionOverflow( + transactionAggregationKey{TransactionName: "txn2", TransactionType: "type2"}, + WithTransactionCount(5)). + Get() + }, + }, + { + name: "global_overflow_due_to_merge", + limits: Limits{ + MaxSpanGroups: 1, + MaxSpanGroupsPerService: 100, + MaxTransactionGroups: 1, + MaxTransactionGroupsPerService: 100, + MaxServiceTransactionGroups: 1, + MaxServiceTransactionGroupsPerService: 100, + MaxServices: 1, + }, + to: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(14)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(7)). + Get() + }, + from: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(10)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span2"}, WithSpanCount(5)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type2"}, + WithTransactionCount(5)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn2", TransactionType: "type2"}, + WithTransactionCount(5)). + GetProto() + }, + expected: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(24)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + // no merge as span, transaction, and service transaction will overflow + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(7)). + // all span, transaction, and service transaction from _from_ will overflow + AddSpanOverflow(spanAggregationKey{SpanName: "span2"}, WithSpanCount(5)). + AddServiceTransactionOverflow( + serviceTransactionAggregationKey{TransactionType: "type2"}, + WithTransactionCount(5)). + AddTransactionOverflow( + transactionAggregationKey{TransactionName: "txn2", TransactionType: "type2"}, + WithTransactionCount(5)). + Get() + }, + }, + { + name: "to_overflowed_before_merge", + limits: Limits{ + MaxSpanGroups: 1, + MaxSpanGroupsPerService: 1, + MaxTransactionGroups: 1, + MaxTransactionGroupsPerService: 1, + MaxServiceTransactionGroups: 1, + MaxServiceTransactionGroupsPerService: 1, + MaxServices: 1, + }, + to: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(34)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(7)). + AddSpanOverflow(spanAggregationKey{SpanName: "span2"}, WithSpanCount(10)). + AddServiceTransactionOverflow( + serviceTransactionAggregationKey{TransactionType: "type2"}, + WithTransactionCount(10)). + AddTransactionOverflow( + transactionAggregationKey{TransactionName: "txn2", TransactionType: "type2"}, + WithTransactionCount(10)). + Get() + }, + from: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(10)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span2"}, WithSpanCount(5)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type2"}, + WithTransactionCount(5)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn2", TransactionType: "type2"}, + WithTransactionCount(5)). + GetProto() + }, + expected: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(44)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(7)). + AddSpanOverflow(spanAggregationKey{SpanName: "span2"}, WithSpanCount(15)). + AddServiceTransactionOverflow( + serviceTransactionAggregationKey{TransactionType: "type2"}, + WithTransactionCount(15), + ). + AddTransactionOverflow( + transactionAggregationKey{TransactionName: "txn2", TransactionType: "type2"}, + WithTransactionCount(15), + ). + Get() + }, + }, + { + name: "from_overflowed_before_merge", + limits: Limits{ + MaxSpanGroups: 1, + MaxSpanGroupsPerService: 1, + MaxTransactionGroups: 1, + MaxTransactionGroupsPerService: 1, + MaxServiceTransactionGroups: 1, + MaxServiceTransactionGroupsPerService: 1, + MaxServices: 1, + }, + to: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(14)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(7)). + Get() + }, + from: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(26)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span2"}, WithSpanCount(5)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type2"}, + WithTransactionCount(5)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn2", TransactionType: "type2"}, + WithTransactionCount(5)). + AddSpanOverflow(spanAggregationKey{SpanName: "span3"}, WithSpanCount(8)). + AddServiceTransactionOverflow( + serviceTransactionAggregationKey{TransactionType: "type3"}, + WithTransactionCount(8)). + AddTransactionOverflow( + transactionAggregationKey{TransactionName: "txn3", TransactionType: "type3"}, + WithTransactionCount(8)). + GetProto() + }, + expected: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(40)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(7)). + AddSpanOverflow(spanAggregationKey{SpanName: "span2"}, WithSpanCount(5)). + AddSpanOverflow(spanAggregationKey{SpanName: "span3"}, WithSpanCount(8)). + AddServiceTransactionOverflow( + serviceTransactionAggregationKey{TransactionType: "type2"}, + WithTransactionCount(5)). + AddTransactionOverflow( + transactionAggregationKey{TransactionName: "txn2", TransactionType: "type2"}, + WithTransactionCount(5)). + AddServiceTransactionOverflow( + serviceTransactionAggregationKey{TransactionType: "type3"}, + WithTransactionCount(8)). + AddTransactionOverflow( + transactionAggregationKey{TransactionName: "txn3", TransactionType: "type3"}, + WithTransactionCount(8)). + Get() + }, + }, + { + name: "svc_overflow", + limits: Limits{ + MaxSpanGroups: 1, + MaxSpanGroupsPerService: 1, + MaxTransactionGroups: 1, + MaxTransactionGroupsPerService: 1, + MaxServiceTransactionGroups: 1, + MaxServiceTransactionGroupsPerService: 1, + MaxServices: 1, + }, + to: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(14)). + AddServiceMetrics( + serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{ + TransactionName: "txn1", + TransactionType: "type1", + }, WithTransactionCount(7)). + Get() + }, + from: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(10)). + AddServiceMetrics( + serviceAggregationKey{Timestamp: ts, ServiceName: "svc2"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(5)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(5)). + AddTransaction( + transactionAggregationKey{ + TransactionName: "txn1", + TransactionType: "type1", + }, WithTransactionCount(5)). + GetProto() + }, + expected: func() combinedMetrics { + tcm := NewTestCombinedMetrics(WithEventsTotal(24)) + tcm. + AddServiceMetrics( + serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{ + TransactionName: "txn1", + TransactionType: "type1", + }, WithTransactionCount(7)) + // svc2 overflows + tcm. + AddServiceMetricsOverflow( + serviceAggregationKey{Timestamp: ts, ServiceName: "svc2"}). + AddTransactionOverflow( + transactionAggregationKey{ + TransactionName: "txn1", + TransactionType: "type1", + }, WithTransactionCount(5)). + AddServiceTransactionOverflow( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(5)). + AddSpanOverflow( + spanAggregationKey{SpanName: "span1"}, WithSpanCount(5)) + return tcm.Get() + }, + }, + { + name: "svc_overflow_only", + limits: Limits{ + MaxSpanGroups: 1, + MaxSpanGroupsPerService: 1, + MaxTransactionGroups: 1, + MaxTransactionGroupsPerService: 1, + MaxServiceTransactionGroups: 1, + MaxServiceTransactionGroupsPerService: 1, + MaxServices: 1, + }, + to: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(111)). + AddServiceMetrics( + serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + Get() + }, + from: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(222)). + AddServiceMetrics( + serviceAggregationKey{Timestamp: ts, ServiceName: "svc2"}). + GetProto() + }, + expected: func() combinedMetrics { + tcm := NewTestCombinedMetrics(WithEventsTotal(333)) + tcm. + AddServiceMetrics( + serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + tcm. + AddServiceMetricsOverflow( + serviceAggregationKey{Timestamp: ts, ServiceName: "svc2"}) + return tcm.Get() + }, + }, + { + name: "per_svc_overflow_known_svc", + limits: Limits{ + MaxSpanGroups: 100, + MaxSpanGroupsPerService: 1, + MaxTransactionGroups: 100, + MaxTransactionGroupsPerService: 1, + MaxServiceTransactionGroups: 100, + MaxServiceTransactionGroupsPerService: 1, + MaxServices: 1, + }, + to: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(14)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(7)). + Get() + }, + from: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(10)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span2"}, WithSpanCount(5)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type2"}, + WithTransactionCount(5)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn2", TransactionType: "type2"}, + WithTransactionCount(5)). + GetProto() + }, + expected: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(24)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddSpan(spanAggregationKey{SpanName: "span1"}, WithSpanCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + AddTransaction( + transactionAggregationKey{TransactionName: "txn1", TransactionType: "type1"}, + WithTransactionCount(7)). + AddSpanOverflow(spanAggregationKey{SpanName: "span2"}, WithSpanCount(5)). + AddServiceTransactionOverflow( + serviceTransactionAggregationKey{TransactionType: "type2"}, + WithTransactionCount(5)). + AddTransactionOverflow( + transactionAggregationKey{TransactionName: "txn2", TransactionType: "type2"}, + WithTransactionCount(5)). + Get() + }, + }, + { + name: "merge_with_empty_combined_metrics", + limits: Limits{ + MaxSpanGroups: 100, + MaxSpanGroupsPerService: 10, + MaxTransactionGroups: 100, + MaxTransactionGroupsPerService: 1, + MaxServiceTransactionGroups: 100, + MaxServiceTransactionGroupsPerService: 1, + MaxServices: 1, + }, + to: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(7)). + AddServiceMetrics( + serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddTransaction( + transactionAggregationKey{ + TransactionName: "txn1", + TransactionType: "type1", + }, WithTransactionCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + Get() + }, + from: func() *aggregationpb.CombinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(1)).GetProto() + }, + expected: func() combinedMetrics { + return NewTestCombinedMetrics(WithEventsTotal(8)). + AddServiceMetrics( + serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + AddTransaction( + transactionAggregationKey{ + TransactionName: "txn1", + TransactionType: "type1", + }, WithTransactionCount(7)). + AddServiceTransaction( + serviceTransactionAggregationKey{TransactionType: "type1"}, + WithTransactionCount(7)). + Get() + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + // Since we start with an existing metrics in combinedMetricsMerger, + // we'll have to make sure constraints struct is containing the correct counts. + metrics := tc.to() + constraints := newConstraints(tc.limits) + for _, svc := range metrics.Services { + constraints.totalTransactionGroups.Add(len(svc.TransactionGroups)) + constraints.totalServiceTransactionGroups.Add(len(svc.ServiceTransactionGroups)) + constraints.totalSpanGroups.Add(len(svc.SpanGroups)) + } + cmm := combinedMetricsMerger{ + limits: tc.limits, + constraints: constraints, + metrics: metrics, + } + cmm.merge(tc.from()) + assert.Empty(t, cmp.Diff( + tc.expected(), cmm.metrics, + protocmp.Transform(), + cmp.Exporter(func(reflect.Type) bool { return true }), + )) + }) + } +} + +func TestCardinalityEstimationOnSubKeyCollision(t *testing.T) { + limits := Limits{ + MaxSpanGroups: 100, + MaxSpanGroupsPerService: 100, + MaxTransactionGroups: 100, + MaxTransactionGroupsPerService: 100, + MaxServiceTransactionGroups: 100, + MaxServiceTransactionGroupsPerService: 100, + MaxServices: 1, + } + ts := time.Time{} + to := NewTestCombinedMetrics(WithEventsTotal(0)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc1"}). + Get() + from1 := NewTestCombinedMetrics(WithEventsTotal(10)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc2"}). + AddSpan(spanAggregationKey{}, WithSpanCount(5)). + AddTransaction(transactionAggregationKey{ + TransactionName: "txn1", + TransactionType: "type1", + }, WithTransactionCount(5)). + AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: "type1", + }, WithTransactionCount(5)). + GetProto() + from2 := NewTestCombinedMetrics(WithEventsTotal(10)). + AddServiceMetrics(serviceAggregationKey{Timestamp: ts, ServiceName: "svc3"}). + AddSpan(spanAggregationKey{}, WithSpanCount(5)). + AddTransaction(transactionAggregationKey{ + TransactionName: "txn1", + TransactionType: "type1", + }, WithTransactionCount(5)). + AddServiceTransaction(serviceTransactionAggregationKey{ + TransactionType: "type1", + }, WithTransactionCount(5)). + GetProto() + cmm := combinedMetricsMerger{ + limits: limits, + metrics: to, + } + cmm.merge(from1) + cmm.merge(from2) + assert.Equal(t, uint64(2), cmm.metrics.OverflowServices.OverflowTransaction.Estimator.Estimate()) + assert.Equal(t, uint64(2), cmm.metrics.OverflowServices.OverflowServiceTransaction.Estimator.Estimate()) + assert.Equal(t, uint64(2), cmm.metrics.OverflowServices.OverflowSpan.Estimator.Estimate()) +} + +func TestMergeHistogramEquiv(t *testing.T) { + for _, tc := range []struct { + name string + recordFunc func(h1, h2 *hdrhistogram.HistogramRepresentation) + }{ + { + name: "zero_values", + recordFunc: func(h1, h2 *hdrhistogram.HistogramRepresentation) { + h1.RecordValues(0, 0) + h2.RecordValues(0, 0) + }, + }, + { + name: "random_only_to", + recordFunc: func(h1, h2 *hdrhistogram.HistogramRepresentation) { + for i := 0; i < 1_000_000; i++ { + v := rand.Int63n(3_600_000_000) + c := rand.Int63n(1_000) + h1.RecordValues(v, c) + } + }, + }, + { + name: "random_only_from", + recordFunc: func(h1, h2 *hdrhistogram.HistogramRepresentation) { + for i := 0; i < 1_000_000; i++ { + v := rand.Int63n(3_600_000_000) + c := rand.Int63n(1_000) + h2.RecordValues(v, c) + } + }, + }, + { + name: "random_both", + recordFunc: func(h1, h2 *hdrhistogram.HistogramRepresentation) { + for i := 0; i < 1_000_000; i++ { + v1, v2 := rand.Int63n(3_600_000_000), rand.Int63n(3_600_000_000) + c1, c2 := rand.Int63n(1_000), rand.Int63n(1_000) + h1.RecordValues(v1, c1) + h2.RecordValues(v2, c2) + } + }, + }, + // There may be special fast paths for single value from, + // and since we get them quite often, + // we have the following test cases for it. + { + name: "random_to_single_value_from_hit", + recordFunc: func(h1, h2 *hdrhistogram.HistogramRepresentation) { + var v, c int64 + for i := 0; i < 1_000_000; i++ { + v = rand.Int63n(3_600_000_000) + c = rand.Int63n(1_000) + h1.RecordValues(v, c) + } + c = rand.Int63n(1_000) + h2.RecordValues(v, c) + }, + }, + { + name: "random_to_single_value_from_miss", + recordFunc: func(h1, h2 *hdrhistogram.HistogramRepresentation) { + for i := 0; i < 1_000_000; i++ { + v := rand.Int63n(3_600_000_000) + c := rand.Int63n(1_000) + h1.RecordValues(v, c) + + } + v := rand.Int63n(3_600_000_000) + c := rand.Int63n(1_000) + h2.RecordValues(v, c) + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + // Test assumes histogram representation Merge is correct + hist1, hist2 := hdrhistogram.New(), hdrhistogram.New() + + tc.recordFunc(hist1, hist2) + histproto1, histproto2 := histogramToProto(hist1), histogramToProto(hist2) + hist1.Merge(hist2) + mergeHistogram(histproto1, histproto2) + histActual := hdrhistogram.New() + histogramFromProto(histActual, histproto1) + + assert.Empty(t, cmp.Diff( + hist1, + histActual, + cmp.AllowUnexported(hdrhistogram.HistogramRepresentation{}), + cmp.AllowUnexported(hdrhistogram.HybridCountsRep{}), + )) + }) + } +} + +func TestMergeHistogram(t *testing.T) { + for _, tc := range []struct { + name string + to *aggregationpb.HDRHistogram + from *aggregationpb.HDRHistogram + expected *aggregationpb.HDRHistogram + }{ + { + name: "non_single_value", + to: &aggregationpb.HDRHistogram{ + Buckets: []int32{1, 3, 5, 7, 9}, + Counts: []int64{1, 3, 5, 7, 9}, + }, + from: &aggregationpb.HDRHistogram{ + Buckets: []int32{2, 4, 5, 8}, + Counts: []int64{2, 4, 5, 8}, + }, + expected: &aggregationpb.HDRHistogram{ + Buckets: []int32{1, 2, 3, 4, 5, 7, 8, 9}, + Counts: []int64{1, 2, 3, 4, 10, 7, 8, 9}, + }, + }, + { + name: "empty_to", + to: &aggregationpb.HDRHistogram{ + Buckets: []int32{}, + Counts: []int64{}, + }, + from: &aggregationpb.HDRHistogram{ + Buckets: []int32{1}, + Counts: []int64{1}, + }, + expected: &aggregationpb.HDRHistogram{ + Buckets: []int32{1}, + Counts: []int64{1}, + }, + }, + { + name: "empty_from", + to: &aggregationpb.HDRHistogram{ + Buckets: []int32{1}, + Counts: []int64{1}, + }, + from: &aggregationpb.HDRHistogram{ + Buckets: []int32{}, + Counts: []int64{}, + }, + expected: &aggregationpb.HDRHistogram{ + Buckets: []int32{1}, + Counts: []int64{1}, + }, + }, + { + name: "single_value_from_hit", + to: &aggregationpb.HDRHistogram{ + Buckets: []int32{1, 2, 3}, + Counts: []int64{1, 2, 3}, + }, + from: &aggregationpb.HDRHistogram{ + Buckets: []int32{3}, + Counts: []int64{4}, + }, + expected: &aggregationpb.HDRHistogram{ + Buckets: []int32{1, 2, 3}, + Counts: []int64{1, 2, 7}, + }, + }, + { + name: "single_value_from_miss", + to: &aggregationpb.HDRHistogram{ + Buckets: []int32{1, 2, 4}, + Counts: []int64{1, 2, 4}, + }, + from: &aggregationpb.HDRHistogram{ + Buckets: []int32{3}, + Counts: []int64{3}, + }, + expected: &aggregationpb.HDRHistogram{ + Buckets: []int32{1, 2, 3, 4}, + Counts: []int64{1, 2, 3, 4}, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + mergeHistogram(tc.to, tc.from) + + assert.Empty(t, cmp.Diff( + tc.expected, + tc.to, + cmpopts.IgnoreUnexported(aggregationpb.HDRHistogram{}), + )) + }) + } +} diff --git a/copy/apm-aggregation/aggregators/models.go b/copy/apm-aggregation/aggregators/models.go new file mode 100644 index 00000000000..1ee21c7f2f6 --- /dev/null +++ b/copy/apm-aggregation/aggregators/models.go @@ -0,0 +1,299 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "time" + + "github.com/axiomhq/hyperloglog" + + "github.com/elastic/apm-aggregation/aggregationpb" + "github.com/elastic/apm-aggregation/aggregators/nullable" + "github.com/elastic/apm-data/model/modelpb" +) + +// Limits define the aggregation limits. Once the limits are reached +// the metrics will overflow into dedicated overflow buckets. +type Limits struct { + // MaxServices is the limit on the total number of unique services. + // A unique service is identified by a unique ServiceAggregationKey. + // This limit is shared across all aggregation metrics. + MaxServices int + + // MaxSpanGroups is the limit on total number of unique span groups + // across all services. + // A unique span group is identified by a unique + // ServiceAggregationKey + SpanAggregationKey. + MaxSpanGroups int + + // MaxSpanGroupsPerService is the limit on the total number of unique + // span groups within a service. + // A unique span group within a service is identified by a unique + // SpanAggregationKey. + MaxSpanGroupsPerService int + + // MaxTransactionGroups is the limit on total number of unique + // transaction groups across all services. + // A unique transaction group is identified by a unique + // ServiceAggregationKey + TransactionAggregationKey. + MaxTransactionGroups int + + // MaxTransactionGroupsPerService is the limit on the number of unique + // transaction groups within a service. + // A unique transaction group within a service is identified by a unique + // TransactionAggregationKey. + MaxTransactionGroupsPerService int + + // MaxServiceTransactionGroups is the limit on total number of unique + // service transaction groups across all services. + // A unique service transaction group is identified by a unique + // ServiceAggregationKey + ServiceTransactionAggregationKey. + MaxServiceTransactionGroups int + + // MaxServiceTransactionGroupsPerService is the limit on the number + // of unique service transaction groups within a service. + // A unique service transaction group within a service is identified + // by a unique ServiceTransactionAggregationKey. + MaxServiceTransactionGroupsPerService int +} + +// CombinedMetricsKey models the key to store the data in LSM tree. +// Each key-value pair represents a set of unique metric for a combined metrics ID. +// The processing time used in the key should be rounded to the +// duration of aggregation since the zero time. +type CombinedMetricsKey struct { + Interval time.Duration + ProcessingTime time.Time + PartitionID uint16 + ID [16]byte +} + +// globalLabels is an intermediate struct used to marshal/unmarshal the +// provided global labels into a comparable format. The format is used by +// pebble db to compare service aggregation keys. +type globalLabels struct { + Labels modelpb.Labels + NumericLabels modelpb.NumericLabels +} + +// combinedMetrics models the value to store the data in LSM tree. +// Each unique combined metrics ID stores a combined metrics per aggregation +// interval. combinedMetrics encapsulates the aggregated metrics +// as well as the overflow metrics. +type combinedMetrics struct { + Services map[serviceAggregationKey]serviceMetrics + + // OverflowServices provides a dedicated bucket for collecting + // aggregate metrics for all the aggregation groups for all services + // that overflowed due to max services limit being reached. + OverflowServices overflow + + // OverflowServicesEstimator estimates the number of unique service + // aggregation keys that overflowed due to max services limit. + OverflowServicesEstimator *hyperloglog.Sketch + + // EventsTotal is the total number of individual events, including + // all overflows, that were aggregated for this combined metrics. It + // is used for internal monitoring purposes and is approximated when + // partitioning is enabled. + EventsTotal float64 + + // YoungestEventTimestamp is the youngest event that was aggregated + // in the combined metrics based on the received timestamp. + YoungestEventTimestamp uint64 +} + +// serviceAggregationKey models the key used to store service specific +// aggregation metrics. +type serviceAggregationKey struct { + Timestamp time.Time + ServiceName string + ServiceEnvironment string + ServiceLanguageName string + AgentName string + GlobalLabelsStr string +} + +// serviceMetrics models the value to store all the aggregated metrics +// for a specific service aggregation key. +type serviceMetrics struct { + OverflowGroups overflow + TransactionGroups map[transactionAggregationKey]*aggregationpb.KeyedTransactionMetrics + ServiceTransactionGroups map[serviceTransactionAggregationKey]*aggregationpb.KeyedServiceTransactionMetrics + SpanGroups map[spanAggregationKey]*aggregationpb.KeyedSpanMetrics +} + +func insertHash(to **hyperloglog.Sketch, hash uint64) { + if *to == nil { + *to = hyperloglog.New14() + } + (*to).InsertHash(hash) +} + +func mergeEstimator(to **hyperloglog.Sketch, from *hyperloglog.Sketch) { + if *to == nil { + *to = hyperloglog.New14() + } + // Ignoring returned error here since the error is only returned if + // the precision is set outside bounds which is not possible for our case. + (*to).Merge(from) +} + +type overflowTransaction struct { + Metrics *aggregationpb.TransactionMetrics + Estimator *hyperloglog.Sketch +} + +func (o *overflowTransaction) Merge( + from *aggregationpb.TransactionMetrics, + hash uint64, +) { + if o.Metrics == nil { + o.Metrics = &aggregationpb.TransactionMetrics{} + } + mergeTransactionMetrics(o.Metrics, from) + insertHash(&o.Estimator, hash) +} + +func (o *overflowTransaction) MergeOverflow(from *overflowTransaction) { + if from.Estimator != nil { + if o.Metrics == nil { + o.Metrics = &aggregationpb.TransactionMetrics{} + } + mergeTransactionMetrics(o.Metrics, from.Metrics) + mergeEstimator(&o.Estimator, from.Estimator) + } +} + +func (o *overflowTransaction) Empty() bool { + return o.Estimator == nil +} + +type overflowServiceTransaction struct { + Metrics *aggregationpb.ServiceTransactionMetrics + Estimator *hyperloglog.Sketch +} + +func (o *overflowServiceTransaction) Merge( + from *aggregationpb.ServiceTransactionMetrics, + hash uint64, +) { + if o.Metrics == nil { + o.Metrics = &aggregationpb.ServiceTransactionMetrics{} + } + mergeServiceTransactionMetrics(o.Metrics, from) + insertHash(&o.Estimator, hash) +} + +func (o *overflowServiceTransaction) MergeOverflow(from *overflowServiceTransaction) { + if from.Estimator != nil { + if o.Metrics == nil { + o.Metrics = &aggregationpb.ServiceTransactionMetrics{} + } + mergeServiceTransactionMetrics(o.Metrics, from.Metrics) + mergeEstimator(&o.Estimator, from.Estimator) + } +} + +func (o *overflowServiceTransaction) Empty() bool { + return o.Estimator == nil +} + +type overflowSpan struct { + Metrics *aggregationpb.SpanMetrics + Estimator *hyperloglog.Sketch +} + +func (o *overflowSpan) Merge( + from *aggregationpb.SpanMetrics, + hash uint64, +) { + if o.Metrics == nil { + o.Metrics = &aggregationpb.SpanMetrics{} + } + mergeSpanMetrics(o.Metrics, from) + insertHash(&o.Estimator, hash) +} + +func (o *overflowSpan) MergeOverflow(from *overflowSpan) { + if from.Estimator != nil { + if o.Metrics == nil { + o.Metrics = &aggregationpb.SpanMetrics{} + } + mergeSpanMetrics(o.Metrics, from.Metrics) + mergeEstimator(&o.Estimator, from.Estimator) + } +} + +func (o *overflowSpan) Empty() bool { + return o.Estimator == nil +} + +// overflow contains transaction and spans overflow metrics and cardinality +// estimators for the aggregation group for overflow buckets. +type overflow struct { + OverflowTransaction overflowTransaction + OverflowServiceTransaction overflowServiceTransaction + OverflowSpan overflowSpan +} + +// transactionAggregationKey models the key used to store transaction +// aggregation metrics. +type transactionAggregationKey struct { + TraceRoot bool + + ContainerID string + KubernetesPodName string + + ServiceVersion string + ServiceNodeName string + + ServiceRuntimeName string + ServiceRuntimeVersion string + ServiceLanguageVersion string + + HostHostname string + HostName string + HostOSPlatform string + + EventOutcome string + + TransactionName string + TransactionType string + TransactionResult string + + FAASColdstart nullable.Bool + FAASID string + FAASName string + FAASVersion string + FAASTriggerType string + + CloudProvider string + CloudRegion string + CloudAvailabilityZone string + CloudServiceName string + CloudAccountID string + CloudAccountName string + CloudMachineType string + CloudProjectID string + CloudProjectName string +} + +// spanAggregationKey models the key used to store span aggregation metrics. +type spanAggregationKey struct { + SpanName string + Outcome string + + TargetType string + TargetName string + + Resource string +} + +// serviceTransactionAggregationKey models the key used to store +// service transaction aggregation metrics. +type serviceTransactionAggregationKey struct { + TransactionType string +} diff --git a/copy/apm-aggregation/aggregators/ndjson_bench_test.go b/copy/apm-aggregation/aggregators/ndjson_bench_test.go new file mode 100644 index 00000000000..48b2e64e654 --- /dev/null +++ b/copy/apm-aggregation/aggregators/ndjson_bench_test.go @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package aggregators + +import ( + "bufio" + "context" + "fmt" + "io" + "io/fs" + "math" + "os" + "testing" + "time" + + "go.uber.org/zap" + "golang.org/x/sync/semaphore" + + "github.com/elastic/apm-data/input/elasticapm" + "github.com/elastic/apm-data/model/modelpb" + "github.com/elastic/apm-data/model/modelprocessor" +) + +func ndjsonToBatch(reader io.Reader) (*modelpb.Batch, error) { + logger, err := zap.NewDevelopment() + if err != nil { + return nil, err + } + elasticapmProcessor := elasticapm.NewProcessor(elasticapm.Config{ + Logger: logger, + MaxEventSize: 1024 * 1024, // 1MiB + Semaphore: semaphore.NewWeighted(1), + }) + baseEvent := modelpb.APMEvent{ + Event: &modelpb.Event{ + Received: modelpb.FromTime(time.Now()), + }, + } + var batch modelpb.Batch + processor := modelprocessor.Chained{ + modelprocessor.SetHostHostname{}, + modelprocessor.SetServiceNodeName{}, + modelprocessor.SetGroupingKey{}, + modelprocessor.SetErrorMessage{}, + modelpb.ProcessBatchFunc(func(ctx context.Context, b *modelpb.Batch) error { + batch = make(modelpb.Batch, len(*b)) + copy(batch, *b) + return nil + }), + } + + var elasticapmResult elasticapm.Result + if err := elasticapmProcessor.HandleStream( + context.TODO(), + &baseEvent, + reader, + math.MaxInt32, // batch size + processor, + &elasticapmResult, + ); err != nil { + return nil, fmt.Errorf("stream error: %w", err) + } + return &batch, nil +} + +// forEachNDJSON loops over ndjson files in testdata. +// The directory is empty by default but the ndjson files can be downloaded from the apm-perf repo. +func forEachNDJSON(b *testing.B, f func(*testing.B, *modelpb.Batch)) { + dirFS := os.DirFS("testdata") + matches, err := fs.Glob(dirFS, "*.ndjson") + if err != nil { + b.Fatal(err) + } + for _, filename := range matches { + b.Run(filename, func(b *testing.B) { + file, err := dirFS.Open(filename) + if err != nil { + b.Fatal(err) + } + defer file.Close() + + batch, err := ndjsonToBatch(bufio.NewReader(file)) + if err != nil { + b.Fatal(err) + } + f(b, batch) + }) + } +} + +func BenchmarkNDJSONSerial(b *testing.B) { + forEachNDJSON(b, func(b *testing.B, batch *modelpb.Batch) { + agg := newTestAggregator(b) + b.Cleanup(func() { + agg.Close(context.TODO()) + }) + cmID := EncodeToCombinedMetricsKeyID(b, "ab01") + b.ResetTimer() + + for i := 0; i < b.N; i++ { + if err := agg.AggregateBatch(context.Background(), cmID, batch); err != nil { + b.Fatal(err) + } + } + }) +} + +func BenchmarkNDJSONParallel(b *testing.B) { + forEachNDJSON(b, func(b *testing.B, batch *modelpb.Batch) { + agg := newTestAggregator(b) + b.Cleanup(func() { + agg.Close(context.TODO()) + }) + cmID := EncodeToCombinedMetricsKeyID(b, "ab01") + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if err := agg.AggregateBatch(context.Background(), cmID, batch); err != nil { + b.Fatal(err) + } + } + }) + }) +} diff --git a/copy/apm-aggregation/aggregators/nullable/bool.go b/copy/apm-aggregation/aggregators/nullable/bool.go new file mode 100644 index 00000000000..964aea4502d --- /dev/null +++ b/copy/apm-aggregation/aggregators/nullable/bool.go @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package nullable + +// Bool represents a bool value which can be set to nil. +// Using uint32 since uint32 is smallest proto type. +type Bool uint32 + +const ( + // Nil represents an unset bool value. + Nil Bool = iota + // False represents a false bool value. + False + // True represents a true bool value. + True +) + +// ParseBoolPtr sets nullable bool from bool pointer. +func (nb *Bool) ParseBoolPtr(b *bool) { + if b == nil { + *nb = Nil + return + } + if *b { + *nb = True + return + } + *nb = False +} + +// ToBoolPtr converts nullable bool to bool pointer. +func (nb *Bool) ToBoolPtr() *bool { + if nb == nil || *nb == Nil { + return nil + } + var b bool + switch *nb { + case False: + b = false + case True: + b = true + } + return &b +} diff --git a/copy/apm-aggregation/aggregators/nullable/doc.go b/copy/apm-aggregation/aggregators/nullable/doc.go new file mode 100644 index 00000000000..e7fd3ac4be7 --- /dev/null +++ b/copy/apm-aggregation/aggregators/nullable/doc.go @@ -0,0 +1,6 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Package nullable contains nullable types. +package nullable diff --git a/copy/apm-aggregation/aggregators/testdata/.gitkeep b/copy/apm-aggregation/aggregators/testdata/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/copy/apm-aggregation/go.mod b/copy/apm-aggregation/go.mod new file mode 100644 index 00000000000..56906a07616 --- /dev/null +++ b/copy/apm-aggregation/go.mod @@ -0,0 +1,76 @@ +module github.com/elastic/apm-aggregation + +go 1.21 + +require ( + github.com/HdrHistogram/hdrhistogram-go v1.1.2 + github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc + github.com/cespare/xxhash/v2 v2.3.0 + github.com/cockroachdb/pebble v1.1.1 + github.com/elastic/apm-data v1.1.0 + github.com/google/go-cmp v0.6.0 + github.com/stretchr/testify v1.9.0 + go.elastic.co/apm/module/apmotel/v2 v2.6.0 + go.elastic.co/apm/v2 v2.6.0 + go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/otel/metric v1.28.0 + go.opentelemetry.io/otel/sdk v1.28.0 + go.opentelemetry.io/otel/sdk/metric v1.28.0 + go.opentelemetry.io/otel/trace v1.28.0 + go.uber.org/zap v1.27.0 + golang.org/x/sync v0.8.0 + golang.org/x/tools v0.24.0 + google.golang.org/protobuf v1.34.2 +) + +require ( + github.com/DataDog/zstd v1.4.5 // indirect + github.com/armon/go-radix v1.0.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect + github.com/elastic/go-sysinfo v1.7.1 // indirect + github.com/elastic/go-windows v1.0.1 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.16.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.12.0 // indirect + github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect + go.elastic.co/apm/module/apmhttp/v2 v2.6.0 // indirect + go.elastic.co/fastjson v1.3.0 // indirect + go.opentelemetry.io/collector/consumer v0.97.0 // indirect + go.opentelemetry.io/collector/pdata v1.4.0 // indirect + go.opentelemetry.io/collector/semconv v0.97.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/sys v0.23.0 // indirect + golang.org/x/text v0.17.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/grpc v1.63.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + howett.net/plist v1.0.0 // indirect +) diff --git a/copy/apm-aggregation/go.sum b/copy/apm-aggregation/go.sum new file mode 100644 index 00000000000..c2b746d1212 --- /dev/null +++ b/copy/apm-aggregation/go.sum @@ -0,0 +1,640 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI= +github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc h1:Keo7wQ7UODUaHcEi7ltENhbAK2VgZjfat6mLy03tQzo= +github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v1.1.1 h1:XnKU22oiCLy2Xn8vp1re67cXg4SAasg/WDt1NtcRFaw= +github.com/cockroachdb/pebble v1.1.1/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= +github.com/elastic/apm-data v1.1.0 h1:5ahaTQwWb6+OmrXrc1dzfD2wjvTG7yJRaldcsc9dIc4= +github.com/elastic/apm-data v1.1.0/go.mod h1:TLHPd2H8wYlf3FIqriQ018NZzE1FLC6VtsQ6DN0vRmY= +github.com/elastic/go-sysinfo v1.7.1 h1:Wx4DSARcKLllpKT2TnFVdSUJOsybqMYCNQZq1/wO+s0= +github.com/elastic/go-sysinfo v1.7.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jaegertracing/jaeger v1.56.0 h1:FT7l1sOjkaNbcJ93O9pqBFUCGegYMLlA14EWWfNh5FM= +github.com/jaegertracing/jaeger v1.56.0/go.mod h1:kyckIZXALyDTXWoC3jSsKRuY8XqyWRNJ3RS04upO4UE= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.97.0 h1:vX/BkTfd7/cvydXJ7FmUy5iSviQeNGAgTCoXcLu7/Ww= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.97.0/go.mod h1:yrlbvRlLeny1kFmj4Ac9BSqv/pOr2h7sOIvDE6OMCKk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.97.0 h1:5/19bkuB0GDo+vScgEnNy/TcXAkmyTu7BenfpaD2a/M= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.97.0/go.mod h1:CuGIeYSpb4Kmyq4Ez83gHbTjNQGxptjYcC4T5dWkPnM= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg= +github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a h1:CmF68hwI0XsOQ5UwlBopMi2Ow4Pbg32akc4KIVCOm+Y= +github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.elastic.co/apm/module/apmhttp/v2 v2.6.0 h1:s8UeNFQmVBCNd4eoz7KDD9rEFhQC0HeUFXz3z9gpAmQ= +go.elastic.co/apm/module/apmhttp/v2 v2.6.0/go.mod h1:D0GLppLuI0Ddwvtl595GUxRgn6Z8L5KaDFVMv2H3GK0= +go.elastic.co/apm/module/apmotel/v2 v2.6.0 h1:5z1/kH2FD/K8Yacl04plBy2YVW6cDPTCMNmM6zG4FJk= +go.elastic.co/apm/module/apmotel/v2 v2.6.0/go.mod h1:jU0+sy3GUHvM70Pf99BXbxgSZyHBFMaBiMWxiNwaxng= +go.elastic.co/apm/v2 v2.6.0 h1:VieBMLQFtXua2YxpYxaSdYGnmmxhLT46gosI5yErJgY= +go.elastic.co/apm/v2 v2.6.0/go.mod h1:33rOXgtHwbgZcDgi6I/GtCSMZQqgxkHC0IQT3gudKvo= +go.elastic.co/fastjson v1.3.0 h1:hJO3OsYIhiqiT4Fgu0ZxAECnKASbwgiS+LMW5oCopKs= +go.elastic.co/fastjson v1.3.0/go.mod h1:K9vDh7O0ODsVKV2B5e2XYLY277QZaCbB3tS1SnARvko= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/collector/consumer v0.97.0 h1:S0BZQtJQxSHT156S8a5rLt3TeWYP8Rq+jn8QEyWQUYk= +go.opentelemetry.io/collector/consumer v0.97.0/go.mod h1:1D06LURiZ/1KA2OnuKNeSn9bvFmJ5ZWe6L8kLu0osSY= +go.opentelemetry.io/collector/pdata v1.4.0 h1:cA6Pr7Z2V7mE+i7FmYpavX7nefzd6H4CICgW0T9aJX0= +go.opentelemetry.io/collector/pdata v1.4.0/go.mod h1:0Ttp4wQinhV5oJTd9MjyvUegmZBO9O0nrlh/+EDLw+Q= +go.opentelemetry.io/collector/semconv v0.97.0 h1:iF3nTfThbiOwz7o5Pocn0dDnDoffd18ijDuf6Mwzi1s= +go.opentelemetry.io/collector/semconv v0.97.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= +go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/copy/apm-aggregation/proto/aggregation.proto b/copy/apm-aggregation/proto/aggregation.proto new file mode 100644 index 00000000000..6eab24cb192 --- /dev/null +++ b/copy/apm-aggregation/proto/aggregation.proto @@ -0,0 +1,137 @@ +syntax = "proto3"; + +package elastic.apm.v1; + +option go_package = "./aggregationpb"; +option optimize_for = SPEED; + +message CombinedMetrics { + repeated KeyedServiceMetrics service_metrics = 1; + Overflow overflow_services = 2; + bytes overflow_services_estimator = 3; + double events_total = 4; + uint64 youngest_event_timestamp = 5; +} + +message KeyedServiceMetrics { + ServiceAggregationKey key = 1; + ServiceMetrics metrics = 2; +} + +message ServiceAggregationKey { + uint64 timestamp = 1; + string service_name = 2; + string service_environment = 3; + string service_language_name = 4; + string agent_name = 5; + bytes global_labels_str = 6; +} + +message ServiceMetrics { + Overflow overflow_groups = 1; + repeated KeyedTransactionMetrics transaction_metrics = 2; + repeated KeyedServiceTransactionMetrics service_transaction_metrics = 3; + repeated KeyedSpanMetrics span_metrics = 4; +} + +message KeyedTransactionMetrics { + TransactionAggregationKey key = 1; + TransactionMetrics metrics = 2; +} + +message TransactionAggregationKey { + bool trace_root = 1; + + string container_id = 2; + string kubernetes_pod_name = 3; + + string service_version = 4; + string service_node_name = 5; + + string service_runtime_name = 6; + string service_runtime_version = 7; + string service_language_version = 8; + + string host_hostname = 9; + string host_name = 10; + string host_os_platform = 11; + + string event_outcome = 12; + + string transaction_name = 13; + string transaction_type = 14; + string transaction_result = 15; + + uint32 faas_coldstart = 16; + string faas_id = 17; + string faas_name = 18; + string faas_version = 19; + string faas_trigger_type = 20; + + string cloud_provider = 21; + string cloud_region = 22; + string cloud_availability_zone = 23; + string cloud_service_name = 24; + string cloud_account_id = 25; + string cloud_account_name = 26; + string cloud_machine_type = 27; + string cloud_project_id = 28; + string cloud_project_name = 29; +} + +message TransactionMetrics { + HDRHistogram histogram = 1; +} + +message KeyedServiceTransactionMetrics { + ServiceTransactionAggregationKey key = 1; + ServiceTransactionMetrics metrics = 2; +} + +message ServiceTransactionAggregationKey { + string transaction_type = 1; +} + +message ServiceTransactionMetrics { + HDRHistogram histogram = 1; + double failure_count = 2; + double success_count = 3; +} + +message KeyedSpanMetrics { + SpanAggregationKey key = 1; + SpanMetrics metrics = 2; +} + +message SpanAggregationKey { + string span_name = 1; + string outcome = 2; + + string target_type = 3; + string target_name = 4; + + string resource = 5; +} + +message SpanMetrics { + double count = 1; + double sum = 2; +} + +message Overflow { + TransactionMetrics overflow_transactions = 1; + ServiceTransactionMetrics overflow_service_transactions = 2; + SpanMetrics overflow_spans = 3; + bytes overflow_transactions_estimator = 4; + bytes overflow_service_transactions_estimator = 5; + bytes overflow_spans_estimator = 6; +} + +message HDRHistogram { + int64 lowest_trackable_value = 1; + int64 highest_trackable_value = 2; + int64 significant_figures = 3; + repeated int64 counts = 4; + repeated int32 buckets = 5; +} + diff --git a/copy/apm-aggregation/proto/buf.yaml b/copy/apm-aggregation/proto/buf.yaml new file mode 100644 index 00000000000..cffcbf9c0b6 --- /dev/null +++ b/copy/apm-aggregation/proto/buf.yaml @@ -0,0 +1,9 @@ +version: v1 +lint: + use: + - DEFAULT + except: + - PACKAGE_DIRECTORY_MATCH +breaking: + use: + - WIRE diff --git a/copy/apm-aggregation/proto/labels.proto b/copy/apm-aggregation/proto/labels.proto new file mode 100644 index 00000000000..293fc9106f9 --- /dev/null +++ b/copy/apm-aggregation/proto/labels.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package elastic.apm.v1; + +option go_package = "./aggregationpb"; +option optimize_for = SPEED; + +message GlobalLabels { + repeated Label labels = 1; + repeated NumericLabel numeric_labels = 2; +} + +message Label { + string key = 1; + string value = 2; + repeated string values = 3; +} + +message NumericLabel { + string key = 1; + double value = 2; + repeated double values = 3; +} diff --git a/copy/apm-aggregation/testdata/sdh_apm_1442_span.json b/copy/apm-aggregation/testdata/sdh_apm_1442_span.json new file mode 100644 index 00000000000..37bb057b5ab --- /dev/null +++ b/copy/apm-aggregation/testdata/sdh_apm_1442_span.json @@ -0,0 +1,1462 @@ +[ + { + "_index": ".ds-traces-apm-default-2024.07.24-000302", + "_id": "tBA65pABjLeaa6AkRvFa", + "_score": 16.245237, + "_source": { + "container": { + "id": "527074092" + }, + "parent": { + "id": "11b52707342c8d61" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "processor": { + "event": "span" + }, + "labels": { + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "service_lob": "retail", + "service_support_email": "Message_A_Llama@vanguard.com", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-0b803a442aebfdc5e", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "http-nio-8080-exec-2", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_distro": "opentelemetry-collector-contrib", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_telemetry_type": "traces", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "aws_ecs_platformversion": "null", + "otl_loadbalancer": "ALB", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "otl_collector_ec2_hostname": "ip-10-157-215-223.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "app_costcenter": "1124", + "service_type": "ecs-v3", + "otl_collector_facing": "private", + "aws_ecs_limits_memory_mb": "2048", + "aws_ecs_cpu_siblings": "2" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "a7d1d585b175", + "type": "apm-server", + "version": "8.14.1" + }, + "trace": { + "id": "66a153f2cd5d24fed0ffe99f2111c9ed" + }, + "@timestamp": "2024-07-24T19:20:21.916Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 467, + "otl_collector_ingest_timestamp_razor": 1721848824 + }, + "service": { + "node": { + "name": "527074092" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-24T19:31:37Z", + "success_count": 1, + "outcome": "success" + }, + "span": { + "duration": { + "us": 232 + }, + "representative_count": 1, + "subtype": "internal", + "name": "HealthcheckController.getHealthcheck", + "id": "ce0f38734f44659b", + "type": "app" + }, + "timestamp": { + "us": 1721848821916996 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.24-000302", + "_id": "QQQ05pABjLeaa6AkfH_n", + "_score": 16.245237, + "_source": { + "container": { + "id": "527074092" + }, + "parent": { + "id": "c016960134ce5eb4" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "processor": { + "event": "span" + }, + "labels": { + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "service_lob": "retail", + "service_support_email": "Message_A_Llama@vanguard.com", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-0f9b58b1da2d6daba", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "http-nio-8080-exec-7", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_telemetry_type": "traces", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "otl_loadbalancer": "ALB", + "aws_ecs_platformversion": "null", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "otl_collector_ec2_hostname": "ip-10-157-215-51.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "service_type": "ecs-v3", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "app_costcenter": "1124", + "otl_collector_facing": "private", + "aws_ecs_limits_memory_mb": "2048", + "aws_ecs_cpu_siblings": "2" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "a7d1d585b175", + "type": "apm-server", + "version": "8.14.1" + }, + "trace": { + "id": "66a14cca3643b29e34a179ecabf1a21d" + }, + "@timestamp": "2024-07-24T18:49:53.031Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 472, + "otl_collector_ingest_timestamp_razor": 1721846994 + }, + "service": { + "node": { + "name": "527074092" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-24T19:25:17Z", + "success_count": 1, + "outcome": "success" + }, + "span": { + "duration": { + "us": 196 + }, + "representative_count": 1, + "subtype": "internal", + "name": "HealthcheckController.getHealthcheck", + "id": "40073a0bbc21b30c", + "type": "app" + }, + "timestamp": { + "us": 1721846993031784 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.24-000302", + "_id": "bD0e5pAB1blyeZ3s5Lgi", + "_score": 16.245237, + "_source": { + "container": { + "id": "527074092" + }, + "parent": { + "id": "c569c3655d9190ba" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "processor": { + "event": "span" + }, + "labels": { + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "service_lob": "retail", + "service_support_email": "Message_A_Llama@vanguard.com", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_aws_region": "us-west-2", + "otl_collector_ec2_instance": "i-002c366ad028a3989", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "http-nio-8080-exec-8", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_telemetry_type": "traces", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "aws_ecs_platformversion": "null", + "otl_loadbalancer": "ALB", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "otl_collector_ec2_hostname": "ip-10-157-214-134.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "service_type": "ecs-v3", + "app_costcenter": "1124", + "otl_collector_facing": "private", + "aws_ecs_cpu_siblings": "2", + "aws_ecs_limits_memory_mb": "2048" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "a7d1d585b175", + "type": "apm-server", + "version": "8.14.1" + }, + "trace": { + "id": "51d94534d63d7b0b7bb1b19d9a62f5fa" + }, + "@timestamp": "2024-07-24T18:33:45.940Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 473, + "otl_collector_ingest_timestamp_razor": 1721846028 + }, + "service": { + "node": { + "name": "527074092" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-24T19:01:42Z", + "success_count": 1, + "outcome": "success" + }, + "span": { + "duration": { + "us": 208 + }, + "representative_count": 1, + "subtype": "internal", + "name": "HealthcheckController.getHealthcheck", + "id": "21484c045d12484c", + "type": "app" + }, + "timestamp": { + "us": 1721846025940421 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.23-000276", + "_id": "dAL74JABIywE2VmNztG5", + "_score": 16.188189, + "_source": { + "container": { + "id": "527074092" + }, + "parent": { + "id": "82d5e801ce3cc1b2" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "processor": { + "event": "span" + }, + "labels": { + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "service_lob": "retail", + "service_support_email": "Message_A_Llama@vanguard.com", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_aws_region": "us-west-2", + "otl_collector_ec2_instance": "i-0d4aa1ec864450b24", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "http-nio-8080-exec-3", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_telemetry_type": "traces", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "otl_loadbalancer": "ALB", + "aws_ecs_platformversion": "null", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "otl_collector_ec2_hostname": "ip-10-157-215-76.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "app_costcenter": "1124", + "service_type": "ecs-v3", + "otl_collector_facing": "private", + "aws_ecs_cpu_siblings": "2", + "aws_ecs_limits_memory_mb": "2048" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "a7d1d585b175", + "type": "apm-server", + "version": "8.14.1" + }, + "trace": { + "id": "d1f3e6000c3a41b7ee2de63b14217953" + }, + "@timestamp": "2024-07-23T18:15:15.871Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 468, + "otl_collector_ingest_timestamp_razor": 1721758517 + }, + "service": { + "node": { + "name": "527074092" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-23T19:05:17Z", + "success_count": 1, + "outcome": "success" + }, + "span": { + "duration": { + "us": 190 + }, + "representative_count": 1, + "subtype": "internal", + "name": "HealthcheckController.getHealthcheck", + "id": "3241510eb23ee6b4", + "type": "app" + }, + "timestamp": { + "us": 1721758515871026 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.23-000276", + "_id": "dQL74JABIywE2VmNztG5", + "_score": 16.188189, + "_source": { + "container": { + "id": "527074092" + }, + "parent": { + "id": "66c07dd62eb18d60" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "processor": { + "event": "span" + }, + "labels": { + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "service_lob": "retail", + "service_support_email": "Message_A_Llama@vanguard.com", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-0d4aa1ec864450b24", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "http-nio-8080-exec-2", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_distro": "opentelemetry-collector-contrib", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_telemetry_type": "traces", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "otl_loadbalancer": "ALB", + "aws_ecs_platformversion": "null", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "otl_collector_ec2_hostname": "ip-10-157-215-76.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "app_costcenter": "1124", + "service_type": "ecs-v3", + "otl_collector_facing": "private", + "aws_ecs_limits_memory_mb": "2048", + "aws_ecs_cpu_siblings": "2" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "a7d1d585b175", + "type": "apm-server", + "version": "8.14.1" + }, + "trace": { + "id": "a65288d22e95d63679b44e1892db8125" + }, + "@timestamp": "2024-07-23T18:15:17.087Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 467, + "otl_collector_ingest_timestamp_razor": 1721758517 + }, + "service": { + "node": { + "name": "527074092" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-23T19:05:17Z", + "success_count": 1, + "outcome": "success" + }, + "span": { + "duration": { + "us": 191 + }, + "representative_count": 1, + "subtype": "internal", + "name": "HealthcheckController.getHealthcheck", + "id": "2bb7b08098491fbc", + "type": "app" + }, + "timestamp": { + "us": 1721758517087347 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.23-000276", + "_id": "1y3C4JABGciCfQtXyKfa", + "_score": 16.188189, + "_source": { + "container": { + "id": "527074092" + }, + "parent": { + "id": "b74aa2f976068f5c" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "processor": { + "event": "span" + }, + "labels": { + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "service_lob": "retail", + "service_support_email": "Message_A_Llama@vanguard.com", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-02e93a5768f12ee8e", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "http-nio-8080-exec-7", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_telemetry_type": "traces", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "aws_ecs_platformversion": "null", + "otl_loadbalancer": "ALB", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "otl_collector_ec2_hostname": "ip-10-157-215-185.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "app_costcenter": "1124", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "service_type": "ecs-v3", + "otl_collector_facing": "private", + "aws_ecs_cpu_siblings": "2", + "aws_ecs_limits_memory_mb": "2048" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "a7d1d585b175", + "type": "apm-server", + "version": "8.14.1" + }, + "trace": { + "id": "2cd1a569cf71300d4e095e98dc65fb9d" + }, + "@timestamp": "2024-07-23T17:30:14.756Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 472, + "otl_collector_ingest_timestamp_razor": 1721755816 + }, + "service": { + "node": { + "name": "527074092" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-23T18:03:00Z", + "success_count": 1, + "outcome": "success" + }, + "span": { + "duration": { + "us": 194 + }, + "representative_count": 1, + "subtype": "internal", + "name": "HealthcheckController.getHealthcheck", + "id": "21d72fd27ba5eb22", + "type": "app" + }, + "timestamp": { + "us": 1721755814756742 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.23-000276", + "_id": "cuTN4JABZfF62MshPSg2", + "_score": 16.188189, + "_source": { + "container": { + "id": "527074092" + }, + "parent": { + "id": "3f1b33aa77fd388b" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "processor": { + "event": "span" + }, + "labels": { + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "service_lob": "retail", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "service_support_email": "Message_A_Llama@vanguard.com", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-0d5d75707ac4b88d4", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "http-nio-8080-exec-1", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_telemetry_type": "traces", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "otl_loadbalancer": "ALB", + "aws_ecs_platformversion": "null", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "otl_collector_ec2_hostname": "ip-10-157-215-95.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "app_costcenter": "1124", + "service_type": "ecs-v3", + "otl_collector_facing": "private", + "aws_ecs_cpu_siblings": "2", + "aws_ecs_limits_memory_mb": "2048" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "a7d1d585b175", + "type": "apm-server", + "version": "8.14.1" + }, + "trace": { + "id": "669fe982e5abe3a0fa8e330fffc8e57d" + }, + "@timestamp": "2024-07-23T17:33:54.534Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 466, + "otl_collector_ingest_timestamp_razor": 1721756036 + }, + "service": { + "node": { + "name": "527074092" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-23T18:14:25Z", + "success_count": 1, + "outcome": "success" + }, + "span": { + "duration": { + "us": 227 + }, + "representative_count": 1, + "subtype": "internal", + "name": "HealthcheckController.getHealthcheck", + "id": "175c95d59ee96075", + "type": "app" + }, + "timestamp": { + "us": 1721756034534985 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.24-000297", + "_id": "LGs25ZABZfF62MshtH1t", + "_score": 15.296299, + "_source": { + "container": { + "id": "527074092" + }, + "parent": { + "id": "2b5a296018ae70e4" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "processor": { + "event": "span" + }, + "labels": { + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "service_lob": "retail", + "service_support_email": "Message_A_Llama@vanguard.com", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-02e93a5768f12ee8e", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "http-nio-8080-exec-3", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_telemetry_type": "traces", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "aws_ecs_platformversion": "null", + "otl_loadbalancer": "ALB", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "otl_collector_ec2_hostname": "ip-10-157-215-185.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "app_costcenter": "1124", + "service_type": "ecs-v3", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "otl_collector_facing": "private", + "aws_ecs_limits_memory_mb": "2048", + "aws_ecs_cpu_siblings": "2" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "a7d1d585b175", + "type": "apm-server", + "version": "8.14.1" + }, + "trace": { + "id": "66a10a1e83255af8d43c6bef2778ff08" + }, + "@timestamp": "2024-07-24T14:05:24.369Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 468, + "otl_collector_ingest_timestamp_razor": 1721829926 + }, + "service": { + "node": { + "name": "527074092" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-24T14:48:05Z", + "success_count": 1, + "outcome": "success" + }, + "span": { + "duration": { + "us": 201 + }, + "representative_count": 1, + "subtype": "internal", + "name": "HealthcheckController.getHealthcheck", + "id": "61d81656e6046845", + "type": "app" + }, + "timestamp": { + "us": 1721829924369208 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.24-000297", + "_id": "G7M15ZABMRHP9fWBoCtX", + "_score": 15.296299, + "_source": { + "container": { + "id": "527074092" + }, + "parent": { + "id": "d047c2fcb9a0140a" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "processor": { + "event": "span" + }, + "labels": { + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "service_lob": "retail", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "service_support_email": "Message_A_Llama@vanguard.com", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-0536d9b9e7893ad58", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "http-nio-8080-exec-9", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_distro": "opentelemetry-collector-contrib", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_telemetry_type": "traces", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "aws_ecs_platformversion": "null", + "otl_loadbalancer": "ALB", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "otl_collector_ec2_hostname": "ip-10-157-215-217.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "service_type": "ecs-v3", + "app_costcenter": "1124", + "otl_collector_facing": "private", + "aws_ecs_cpu_siblings": "2", + "aws_ecs_limits_memory_mb": "2048" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "6a6bb3a650cf", + "type": "apm-server", + "version": "8.14.1" + }, + "trace": { + "id": "3916b64348ce998e9bc6789997d6f272" + }, + "@timestamp": "2024-07-24T14:06:31.256Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 474, + "otl_collector_ingest_timestamp_razor": 1721829991 + }, + "service": { + "node": { + "name": "527074092" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-24T14:46:55Z", + "success_count": 1, + "outcome": "success" + }, + "span": { + "duration": { + "us": 581 + }, + "representative_count": 1, + "subtype": "internal", + "name": "HealthcheckController.getHealthcheck", + "id": "ed199ddf8fc71793", + "type": "app" + }, + "timestamp": { + "us": 1721829991256399 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.24-000297", + "_id": "t9Q05ZABjLeaa6AkaAVu", + "_score": 15.296299, + "_source": { + "container": { + "id": "527074092" + }, + "parent": { + "id": "fa95d3099af0ed29" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "processor": { + "event": "span" + }, + "labels": { + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "service_lob": "retail", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "service_support_email": "Message_A_Llama@vanguard.com", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_aws_region": "us-west-2", + "otl_collector_ec2_instance": "i-013023c31aa742a6e", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "http-nio-8080-exec-5", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_distro": "opentelemetry-collector-contrib", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_telemetry_type": "traces", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "aws_ecs_platformversion": "null", + "otl_loadbalancer": "ALB", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "otl_collector_ec2_hostname": "ip-10-157-215-24.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "app_costcenter": "1124", + "service_type": "ecs-v3", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "otl_collector_facing": "private", + "aws_ecs_cpu_siblings": "2", + "aws_ecs_limits_memory_mb": "2048" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "a7d1d585b175", + "type": "apm-server", + "version": "8.14.1" + }, + "trace": { + "id": "66a10b33434fe95aebb6b314ab4becb8" + }, + "@timestamp": "2024-07-24T14:10:00.684Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 470, + "otl_collector_ingest_timestamp_razor": 1721830201 + }, + "service": { + "node": { + "name": "527074092" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-24T14:45:35Z", + "success_count": 1, + "outcome": "success" + }, + "span": { + "duration": { + "us": 224 + }, + "representative_count": 1, + "subtype": "internal", + "name": "HealthcheckController.getHealthcheck", + "id": "bc44ecb85f7f6bb1", + "type": "app" + }, + "timestamp": { + "us": 1721830200684157 + } + } + } + ] diff --git a/copy/apm-aggregation/testdata/sdh_apm_1442_transaction.json b/copy/apm-aggregation/testdata/sdh_apm_1442_transaction.json new file mode 100644 index 00000000000..dfc9c21883f --- /dev/null +++ b/copy/apm-aggregation/testdata/sdh_apm_1442_transaction.json @@ -0,0 +1,1652 @@ +[ + { + "_index": ".ds-traces-apm-default-2024.07.29-000541", + "_id": "jgQJAJEBxKiB_O3yuCf2", + "_score": 15.4474, + "_source": { + "container": { + "id": "527074092" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "source": { + "port": 6379, + "domain": "replica.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.169.230.154" + }, + "processor": { + "event": "transaction" + }, + "labels": { + "net_transport": "ip_tcp", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "service_ecs_pipeline_version": "1.3.606", + "db_statement": "ROLE", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "otel_status_code": "OK", + "service_lob": "retail", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "service_support_email": "Message_A_Llama@vanguard.com", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-02e93a5768f12ee8e", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "lettuce-epollEventLoop-4-2", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_telemetry_type": "traces", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "otl_loadbalancer": "ALB", + "aws_ecs_platformversion": "null", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "db_system": "redis", + "otl_collector_ec2_hostname": "ip-10-157-215-185.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "app_costcenter": "1124", + "service_type": "ecs-v3", + "otl_collector_facing": "private", + "aws_ecs_limits_memory_mb": "2048", + "aws_ecs_cpu_siblings": "2" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "8565a6c6f980", + "type": "apm-server", + "version": "8.14.3" + }, + "trace": { + "id": "6ad02febb238de76fc84d9cdb6893566" + }, + "@timestamp": "2024-07-29T19:10:11.345Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 1179, + "otl_collector_ingest_timestamp_razor": 1722280211 + }, + "service": { + "node": { + "name": "527074092" + }, + "framework": { + "name": "io.opentelemetry.lettuce-5.1", + "version": "1.15.0-alpha" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "client": { + "port": 6379, + "domain": "replica.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.169.230.154" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-29T19:48:42Z", + "success_count": 1, + "outcome": "success" + }, + "transaction": { + "result": "Success", + "duration": { + "us": 344 + }, + "representative_count": 1, + "name": "ROLE", + "id": "7387939031543e97", + "type": "unknown", + "sampled": true + }, + "span": { + "id": "7387939031543e97" + }, + "timestamp": { + "us": 1722280211345123 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.29-000541", + "_id": "jwQJAJEBxKiB_O3yuCf2", + "_score": 15.4474, + "_source": { + "container": { + "id": "527074092" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "source": { + "port": 6379, + "domain": "master.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.157.245.8" + }, + "processor": { + "event": "transaction" + }, + "labels": { + "net_transport": "ip_tcp", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "service_ecs_pipeline_version": "1.3.606", + "db_statement": "ROLE", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "otel_status_code": "OK", + "service_lob": "retail", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "service_support_email": "Message_A_Llama@vanguard.com", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-02e93a5768f12ee8e", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "lettuce-epollEventLoop-4-1", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_distro": "opentelemetry-collector-contrib", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_telemetry_type": "traces", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "aws_ecs_platformversion": "null", + "otl_loadbalancer": "ALB", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "db_system": "redis", + "otl_collector_ec2_hostname": "ip-10-157-215-185.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "service_type": "ecs-v3", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "app_costcenter": "1124", + "otl_collector_facing": "private", + "aws_ecs_limits_memory_mb": "2048", + "aws_ecs_cpu_siblings": "2" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "8565a6c6f980", + "type": "apm-server", + "version": "8.14.3" + }, + "trace": { + "id": "a1d6d0e1833631f81d38d9add5d3f204" + }, + "@timestamp": "2024-07-29T19:10:11.346Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 1178, + "otl_collector_ingest_timestamp_razor": 1722280211 + }, + "service": { + "node": { + "name": "527074092" + }, + "framework": { + "name": "io.opentelemetry.lettuce-5.1", + "version": "1.15.0-alpha" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "client": { + "port": 6379, + "domain": "master.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.157.245.8" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-29T19:48:42Z", + "success_count": 1, + "outcome": "success" + }, + "transaction": { + "result": "Success", + "duration": { + "us": 1097 + }, + "representative_count": 1, + "name": "ROLE", + "id": "b8d5b259229df6e6", + "type": "unknown", + "sampled": true + }, + "span": { + "id": "b8d5b259229df6e6" + }, + "timestamp": { + "us": 1722280211346355 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.29-000541", + "_id": "kAQJAJEBxKiB_O3yuCf2", + "_score": 15.4474, + "_source": { + "container": { + "id": "527074092" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "source": { + "port": 6379, + "domain": "replica.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.169.230.154" + }, + "processor": { + "event": "transaction" + }, + "labels": { + "net_transport": "ip_tcp", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "service_ecs_pipeline_version": "1.3.606", + "db_statement": "CLIENT SETNAME lettuce#MasterSlaveTopologyRefresh", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "otel_status_code": "OK", + "service_lob": "retail", + "service_support_email": "Message_A_Llama@vanguard.com", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-02e93a5768f12ee8e", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "lettuce-epollEventLoop-4-1", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_telemetry_type": "traces", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "otl_loadbalancer": "ALB", + "aws_ecs_platformversion": "null", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "db_system": "redis", + "otl_collector_ec2_hostname": "ip-10-157-215-185.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "service_type": "ecs-v3", + "app_costcenter": "1124", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "otl_collector_facing": "private", + "aws_ecs_cpu_siblings": "2", + "aws_ecs_limits_memory_mb": "2048" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "8565a6c6f980", + "type": "apm-server", + "version": "8.14.3" + }, + "trace": { + "id": "25cdb88b2e230684e8ecdef118328029" + }, + "@timestamp": "2024-07-29T19:10:11.360Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 1178, + "otl_collector_ingest_timestamp_razor": 1722280211 + }, + "service": { + "node": { + "name": "527074092" + }, + "framework": { + "name": "io.opentelemetry.lettuce-5.1", + "version": "1.15.0-alpha" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "client": { + "port": 6379, + "domain": "replica.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.169.230.154" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-29T19:48:42Z", + "success_count": 1, + "outcome": "success" + }, + "transaction": { + "result": "Success", + "duration": { + "us": 310 + }, + "representative_count": 1, + "name": "CLIENT", + "id": "26b3ec5bb11c161c", + "type": "unknown", + "sampled": true + }, + "span": { + "id": "26b3ec5bb11c161c" + }, + "timestamp": { + "us": 1722280211360370 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.29-000541", + "_id": "kQQJAJEBxKiB_O3yuCf2", + "_score": 15.4474, + "_source": { + "container": { + "id": "527074092" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "source": { + "port": 6379, + "domain": "replica.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.169.230.154" + }, + "processor": { + "event": "transaction" + }, + "labels": { + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "net_transport": "ip_tcp", + "service_ecs_pipeline_version": "1.3.606", + "db_statement": "PING NODES", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "otel_status_code": "OK", + "service_lob": "retail", + "service_support_email": "Message_A_Llama@vanguard.com", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_aws_region": "us-west-2", + "otl_collector_ec2_instance": "i-02e93a5768f12ee8e", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "lettuce-epollEventLoop-4-1", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_telemetry_type": "traces", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "otl_loadbalancer": "ALB", + "aws_ecs_platformversion": "null", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "db_system": "redis", + "otl_collector_ec2_hostname": "ip-10-157-215-185.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "service_type": "ecs-v3", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "app_costcenter": "1124", + "otl_collector_facing": "private", + "aws_ecs_limits_memory_mb": "2048", + "aws_ecs_cpu_siblings": "2" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "8565a6c6f980", + "type": "apm-server", + "version": "8.14.3" + }, + "trace": { + "id": "d06f246d37bbe1e56c91c562c4d16734" + }, + "@timestamp": "2024-07-29T19:10:11.361Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 1178, + "otl_collector_ingest_timestamp_razor": 1722280211 + }, + "service": { + "node": { + "name": "527074092" + }, + "framework": { + "name": "io.opentelemetry.lettuce-5.1", + "version": "1.15.0-alpha" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "client": { + "port": 6379, + "domain": "replica.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.169.230.154" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-29T19:48:42Z", + "success_count": 1, + "outcome": "success" + }, + "transaction": { + "result": "Success", + "duration": { + "us": 282 + }, + "representative_count": 1, + "name": "PING", + "id": "1ff72ec40a0dd7b8", + "type": "unknown", + "sampled": true + }, + "span": { + "id": "1ff72ec40a0dd7b8" + }, + "timestamp": { + "us": 1722280211361770 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.29-000541", + "_id": "kgQJAJEBxKiB_O3yuCf2", + "_score": 15.4474, + "_source": { + "container": { + "id": "527074092" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "source": { + "port": 6379, + "domain": "master.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.157.245.8" + }, + "processor": { + "event": "transaction" + }, + "labels": { + "service_ecs_pipeline_version": "1.3.606", + "net_transport": "ip_tcp", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "db_statement": "PING NODES", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "otel_status_code": "OK", + "service_lob": "retail", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "service_support_email": "Message_A_Llama@vanguard.com", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-02e93a5768f12ee8e", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "lettuce-epollEventLoop-4-2", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_telemetry_type": "traces", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "otl_loadbalancer": "ALB", + "aws_ecs_platformversion": "null", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "db_system": "redis", + "otl_collector_ec2_hostname": "ip-10-157-215-185.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "service_type": "ecs-v3", + "app_costcenter": "1124", + "otl_collector_facing": "private", + "aws_ecs_limits_memory_mb": "2048", + "aws_ecs_cpu_siblings": "2" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "8565a6c6f980", + "type": "apm-server", + "version": "8.14.3" + }, + "trace": { + "id": "2776578adc5d48af630fec0ba55f45b4" + }, + "@timestamp": "2024-07-29T19:10:11.361Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 1179, + "otl_collector_ingest_timestamp_razor": 1722280211 + }, + "service": { + "node": { + "name": "527074092" + }, + "framework": { + "name": "io.opentelemetry.lettuce-5.1", + "version": "1.15.0-alpha" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "client": { + "port": 6379, + "domain": "master.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.157.245.8" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-29T19:48:42Z", + "success_count": 1, + "outcome": "success" + }, + "transaction": { + "result": "Success", + "duration": { + "us": 2060 + }, + "representative_count": 1, + "name": "PING", + "id": "314df690db0ec01a", + "type": "unknown", + "sampled": true + }, + "span": { + "id": "314df690db0ec01a" + }, + "timestamp": { + "us": 1722280211361702 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.29-000541", + "_id": "R9sBAJEBUNBSa9CU8mxQ", + "_score": 15.4474, + "_source": { + "container": { + "id": "527074092" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "source": { + "port": 6379, + "domain": "replica.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.169.252.248" + }, + "processor": { + "event": "transaction" + }, + "labels": { + "net_transport": "ip_tcp", + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "db_statement": "ROLE", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "otel_status_code": "OK", + "service_lob": "retail", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "service_support_email": "Message_A_Llama@vanguard.com", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-00ba6bea2ff2af61b", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "lettuce-epollEventLoop-4-2", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_distro": "opentelemetry-collector-contrib", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_telemetry_type": "traces", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "aws_ecs_platformversion": "null", + "otl_loadbalancer": "ALB", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "db_system": "redis", + "otl_collector_ec2_hostname": "ip-10-157-215-207.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "service_type": "ecs-v3", + "app_costcenter": "1124", + "otl_collector_facing": "private", + "aws_ecs_limits_memory_mb": "2048", + "aws_ecs_cpu_siblings": "2" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "8565a6c6f980", + "type": "apm-server", + "version": "8.14.3" + }, + "trace": { + "id": "bf7294a61e9075cb2e435416b5025f4e" + }, + "@timestamp": "2024-07-29T19:04:11.015Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 1179, + "otl_collector_ingest_timestamp_razor": 1722279851 + }, + "service": { + "node": { + "name": "527074092" + }, + "framework": { + "name": "io.opentelemetry.lettuce-5.1", + "version": "1.15.0-alpha" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "client": { + "port": 6379, + "domain": "replica.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.169.252.248" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-29T19:40:13Z", + "success_count": 1, + "outcome": "success" + }, + "transaction": { + "result": "Success", + "duration": { + "us": 697 + }, + "representative_count": 1, + "name": "ROLE", + "id": "b4304c5e2e3660f7", + "type": "unknown", + "sampled": true + }, + "span": { + "id": "b4304c5e2e3660f7" + }, + "timestamp": { + "us": 1722279851015760 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.29-000541", + "_id": "StsBAJEBUNBSa9CU8mxQ", + "_score": 15.4474, + "_source": { + "container": { + "id": "527074092" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "source": { + "port": 6379, + "domain": "master.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.157.245.8" + }, + "processor": { + "event": "transaction" + }, + "labels": { + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "net_transport": "ip_tcp", + "db_statement": "ROLE", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "otel_status_code": "OK", + "service_lob": "retail", + "service_support_email": "Message_A_Llama@vanguard.com", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_aws_region": "us-west-2", + "otl_collector_ec2_instance": "i-00ba6bea2ff2af61b", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "lettuce-epollEventLoop-4-1", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_telemetry_type": "traces", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "otl_loadbalancer": "ALB", + "aws_ecs_platformversion": "null", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "db_system": "redis", + "otl_collector_ec2_hostname": "ip-10-157-215-207.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "app_costcenter": "1124", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "service_type": "ecs-v3", + "otl_collector_facing": "private", + "aws_ecs_cpu_siblings": "2", + "aws_ecs_limits_memory_mb": "2048" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "8565a6c6f980", + "type": "apm-server", + "version": "8.14.3" + }, + "trace": { + "id": "50f9b454608e0c7b7a8ea4436cc2b89a" + }, + "@timestamp": "2024-07-29T19:04:11.020Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 1178, + "otl_collector_ingest_timestamp_razor": 1722279851 + }, + "service": { + "node": { + "name": "527074092" + }, + "framework": { + "name": "io.opentelemetry.lettuce-5.1", + "version": "1.15.0-alpha" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "client": { + "port": 6379, + "domain": "master.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.157.245.8" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-29T19:40:13Z", + "success_count": 1, + "outcome": "success" + }, + "transaction": { + "result": "Success", + "duration": { + "us": 1054 + }, + "representative_count": 1, + "name": "ROLE", + "id": "1183675f6806c0a4", + "type": "unknown", + "sampled": true + }, + "span": { + "id": "1183675f6806c0a4" + }, + "timestamp": { + "us": 1722279851020554 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.29-000541", + "_id": "TdsBAJEBUNBSa9CU8mxQ", + "_score": 15.4474, + "_source": { + "container": { + "id": "527074092" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "source": { + "port": 6379, + "domain": "replica.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.169.252.248" + }, + "processor": { + "event": "transaction" + }, + "labels": { + "net_transport": "ip_tcp", + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "db_statement": "CLIENT SETNAME lettuce#MasterSlaveTopologyRefresh", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "otel_status_code": "OK", + "service_lob": "retail", + "service_support_email": "Message_A_Llama@vanguard.com", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_ec2_instance": "i-00ba6bea2ff2af61b", + "otl_collector_aws_region": "us-west-2", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "lettuce-epollEventLoop-4-1", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_telemetry_type": "traces", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "otl_loadbalancer": "ALB", + "aws_ecs_platformversion": "null", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "db_system": "redis", + "otl_collector_ec2_hostname": "ip-10-157-215-207.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "service_type": "ecs-v3", + "app_costcenter": "1124", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "otl_collector_facing": "private", + "aws_ecs_cpu_siblings": "2", + "aws_ecs_limits_memory_mb": "2048" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "8565a6c6f980", + "type": "apm-server", + "version": "8.14.3" + }, + "trace": { + "id": "14c1f2923b7d2d06d91ae0c46dd3121b" + }, + "@timestamp": "2024-07-29T19:04:11.032Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 1178, + "otl_collector_ingest_timestamp_razor": 1722279851 + }, + "service": { + "node": { + "name": "527074092" + }, + "framework": { + "name": "io.opentelemetry.lettuce-5.1", + "version": "1.15.0-alpha" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "client": { + "port": 6379, + "domain": "replica.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.169.252.248" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-29T19:40:13Z", + "success_count": 1, + "outcome": "success" + }, + "transaction": { + "result": "Success", + "duration": { + "us": 784 + }, + "representative_count": 1, + "name": "CLIENT", + "id": "1af673c26ffe93ac", + "type": "unknown", + "sampled": true + }, + "span": { + "id": "1af673c26ffe93ac" + }, + "timestamp": { + "us": 1722279851032824 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.29-000541", + "_id": "TtsBAJEBUNBSa9CU8mxQ", + "_score": 15.4474, + "_source": { + "container": { + "id": "527074092" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "source": { + "port": 6379, + "domain": "replica.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.169.252.248" + }, + "processor": { + "event": "transaction" + }, + "labels": { + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "net_transport": "ip_tcp", + "db_statement": "PING NODES", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "otel_status_code": "OK", + "service_lob": "retail", + "service_support_email": "Message_A_Llama@vanguard.com", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_aws_region": "us-west-2", + "otl_collector_ec2_instance": "i-00ba6bea2ff2af61b", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "lettuce-epollEventLoop-4-1", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_distro": "opentelemetry-collector-contrib", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_telemetry_type": "traces", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "aws_ecs_platformversion": "null", + "otl_loadbalancer": "ALB", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "db_system": "redis", + "otl_collector_ec2_hostname": "ip-10-157-215-207.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "service_type": "ecs-v3", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "app_costcenter": "1124", + "otl_collector_facing": "private", + "aws_ecs_limits_memory_mb": "2048", + "aws_ecs_cpu_siblings": "2" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "8565a6c6f980", + "type": "apm-server", + "version": "8.14.3" + }, + "trace": { + "id": "172d31c09cbbec4a1389f6f4f4592f63" + }, + "@timestamp": "2024-07-29T19:04:11.066Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 1178, + "otl_collector_ingest_timestamp_razor": 1722279851 + }, + "service": { + "node": { + "name": "527074092" + }, + "framework": { + "name": "io.opentelemetry.lettuce-5.1", + "version": "1.15.0-alpha" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "client": { + "port": 6379, + "domain": "replica.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.169.252.248" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-29T19:40:13Z", + "success_count": 1, + "outcome": "success" + }, + "transaction": { + "result": "Success", + "duration": { + "us": 677 + }, + "representative_count": 1, + "name": "PING", + "id": "8df0d7a2811cbfb2", + "type": "unknown", + "sampled": true + }, + "span": { + "id": "8df0d7a2811cbfb2" + }, + "timestamp": { + "us": 1722279851066735 + } + } + }, + { + "_index": ".ds-traces-apm-default-2024.07.29-000541", + "_id": "UdsBAJEBUNBSa9CU8mxQ", + "_score": 15.4474, + "_source": { + "container": { + "id": "527074092" + }, + "agent": { + "name": "opentelemetry/java", + "version": "1.15.0" + }, + "process": { + "pid": 393, + "command_line": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java -Dio.opentelemetry.javaagent.slf4j.simpleLogger.log.io.opentelemetry.exporters.otlp.OtlpGrpcMetricExporter=error -javaagent:/target/codeguru-profiler-java-agent.jar=profilingGroupName:mcf-secure-message-consumer-webservice-external,region:us-west-2,heapSummaryEnabled:true -Xms768m -Xmx1024m -XX:MetaspaceSize=128m -XX:CompileThreshold=1000 -XX:InitialCodeCacheSize=56m -XX:ReservedCodeCacheSize=128m -Dfile.encoding=ISO8859-1 -Djava.awt.headless=true -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70 -XX:SurvivorRatio=6 -XX:TargetSurvivorRatio=90 -verbose:gc -XX:+DisableExplicitGC -XX:+PrintGCDetails -XX:+PrintClassHistogram -XX:+PrintCommandLineFlags -XX:+HeapDumpOnOutOfMemoryError -XX:-UseBiasedLocking -Dsun.net.client.defaultReadTimeout=5000 -Dsun.net.client.defaultConnectTimeout=5000 -Dsun", + "executable": "/usr/lib/jvm/java-11-openjdk-11.0.23.0.9-2.el7_9.x86_64:bin:java" + }, + "source": { + "port": 6379, + "domain": "master.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.157.245.8" + }, + "processor": { + "event": "transaction" + }, + "labels": { + "service_ecs_pipeline_version": "1.3.606", + "aws_ecs_startedat": "2024-07-21T10:58:08.975968728Z", + "net_transport": "ip_tcp", + "db_statement": "CLIENT SETNAME lettuce#MasterSlaveTopologyRefresh", + "aws_ecs_cpu_model": "Intel(R)%20Xeon(R)%20Platinum%208259CL%20CPU", + "aws_ecs_task_arn": "arn:aws:ecs:us-west-2:698985542163:task/mcf-cluster-RETSMC-QKCUIVL-external-green/3421039365e3405cb3ec5697729247e4", + "aws_ecs_cpu_threads_per_core": "2", + "aws_ecs_cpu_cores": "1", + "aws_ecs_container_name": "app", + "otl_collector_version": "0.42.0", + "service_language": "java11", + "service_appprefix": "mcf", + "otel_status_code": "OK", + "service_lob": "retail", + "cloud_infrastructure_service": "AWS_ECS_FARGATE", + "service_support_email": "Message_A_Llama@vanguard.com", + "otl_collector_url": "https://otelcollector.gtot.c1.vanguard.com", + "telemetry_auto_version": "1.15.0", + "otl_collector_aws_region": "us-west-2", + "otl_collector_ec2_instance": "i-00ba6bea2ff2af61b", + "aws_ecs_task_family": "mcf-secure-message-consumer-webservice-external-v3-ecs-common-green-ApplicationEcsTask-T52lwD9MH8kr", + "thread_name": "lettuce-epollEventLoop-4-2", + "aws_ecs_limits_cpu": "1", + "aws_ecs_cpu_speed": "2.50GHz", + "service_runtime_environment": "test", + "otl_collector_distro": "opentelemetry-collector-contrib", + "aws_ecs_cpu_cache_size": "36608%20KB", + "otl_collector_aws_vgaccount": "vgi-gto-test", + "aws_ecs_cluster_arn": "arn:aws:ecs:us-west-2:698985542163:cluster/mcf-cluster-RETSMC-QKCUIVL-external-green", + "otl_collector_forwarded_to_version": "0.87.0", + "cloud_account_name": "vgi-retail-test", + "process_runtime_description": "Red Hat, Inc. OpenJDK 64-Bit Server VM 11.0.23+9-LTS", + "aws_ecs_bamboo_image": "RETSMC-QKCUIVL-317", + "otl_collector_forwarded_from_name": "GTO OpenTelemetry Collector - Rock", + "service_baseline": "green", + "otl_telemetry_type": "traces", + "otl_collector_forwarded_to_aws_region": "us-west-2", + "otl_collector_bamboo_buildresult": "GTODTR-VKJWCBT-122", + "service_instance": "7e3afdae-9e84-429c-adfe-9e51371362f2", + "otl_collector_platform": "EC2", + "otl_loadbalancer": "ALB", + "aws_ecs_platformversion": "null", + "service_family": "ConversationChannel", + "otl_collector_name": "GTO OpenTelemetry Collector - Rock", + "service_syslevel": "test", + "service_userlocation": "external", + "vg_otel_java_extension_version": "beta-2", + "aws_ecs_capacityprovidername": "FARGATE", + "db_system": "redis", + "otl_collector_ec2_hostname": "ip-10-157-215-207.us-west-2.gtot.c1.vanguard.com", + "aws_ecs_launchtype": "FARGATE", + "app_costcenter": "1124", + "service_type": "ecs-v3", + "otl_collector_forwarded_to_name": "GTO OpenTelemetry Collector - Razor", + "otl_collector_facing": "private", + "aws_ecs_limits_memory_mb": "2048", + "aws_ecs_cpu_siblings": "2" + }, + "cloud": { + "provider": "aws", + "region": "us-west-2" + }, + "observer": { + "hostname": "8565a6c6f980", + "type": "apm-server", + "version": "8.14.3" + }, + "trace": { + "id": "1a66048f1efa1504b0d7467d2494e17b" + }, + "@timestamp": "2024-07-29T19:04:11.066Z", + "data_stream": { + "namespace": "default", + "type": "traces", + "dataset": "apm" + }, + "numeric_labels": { + "vg_otel_java_extension_duration": 0, + "thread_id": 1179, + "otl_collector_ingest_timestamp_razor": 1722279851 + }, + "service": { + "node": { + "name": "527074092" + }, + "framework": { + "name": "io.opentelemetry.lettuce-5.1", + "version": "1.15.0-alpha" + }, + "name": "MCF", + "runtime": { + "name": "OpenJDK Runtime Environment", + "version": "11.0.23+9-LTS" + }, + "language": { + "name": "java" + }, + "version": "RETSMC-QKCUIVL-317" + }, + "host": { + "hostname": "ip-100-107-156-172.us-west-2.compute.internal", + "os": { + "type": "linux", + "platform": "linux", + "full": "Linux 5.10.219-208.866.amzn2.x86_64" + }, + "name": "ip-100-107-156-172.us-west-2.compute.internal", + "architecture": "amd64" + }, + "client": { + "port": 6379, + "domain": "master.mcf-webservice-elasticache-external.q6v1uh.usw2.cache.amazonaws.com", + "ip": "10.157.245.8" + }, + "event": { + "agent_id_status": "missing", + "ingested": "2024-07-29T19:40:13Z", + "success_count": 1, + "outcome": "success" + }, + "transaction": { + "result": "Success", + "duration": { + "us": 931 + }, + "representative_count": 1, + "name": "CLIENT", + "id": "d2798cb03a275075", + "type": "unknown", + "sampled": true + }, + "span": { + "id": "d2798cb03a275075" + }, + "timestamp": { + "us": 1722279851066604 + } + } + } + ] diff --git a/copy/apm-aggregation/tools/go.mod b/copy/apm-aggregation/tools/go.mod new file mode 100644 index 00000000000..9d8054a9df5 --- /dev/null +++ b/copy/apm-aggregation/tools/go.mod @@ -0,0 +1,18 @@ +module github.com/elastic/apm-aggregation/tools + +go 1.19 + +require ( + github.com/elastic/go-licenser v0.4.2 + github.com/planetscale/vtprotobuf v0.6.0 + golang.org/x/tools v0.23.0 + honnef.co/go/tools v0.4.7 +) + +require ( + github.com/BurntSushi/toml v1.2.1 // indirect + golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/sync v0.7.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect +) diff --git a/copy/apm-aggregation/tools/go.sum b/copy/apm-aggregation/tools/go.sum new file mode 100644 index 00000000000..ec5f68f2387 --- /dev/null +++ b/copy/apm-aggregation/tools/go.sum @@ -0,0 +1,23 @@ +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/elastic/go-licenser v0.4.2 h1:bPbGm8bUd8rxzSswFOqvQh1dAkKGkgAmrPxbUi+Y9+A= +github.com/elastic/go-licenser v0.4.2/go.mod h1:W8eH6FaZDR8fQGm+7FnVa7MxI1b/6dAqxz+zPB8nm5c= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/planetscale/vtprotobuf v0.6.0 h1:nBeETjudeJ5ZgBHUz1fVHvbqUKnYOXNhsIEabROxmNA= +github.com/planetscale/vtprotobuf v0.6.0/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a h1:Jw5wfR+h9mnIYH+OtGT2im5wV1YGGDora5vTv/aa5bE= +golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +honnef.co/go/tools v0.4.7 h1:9MDAWxMoSnB6QoSqiVr7P5mtkT9pOc1kSxchzPCnqJs= +honnef.co/go/tools v0.4.7/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= diff --git a/copy/apm-aggregation/tools/install-protoc.sh b/copy/apm-aggregation/tools/install-protoc.sh new file mode 100755 index 00000000000..eda979b085f --- /dev/null +++ b/copy/apm-aggregation/tools/install-protoc.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +set -eo pipefail + +ARCH=$(uname -m) +PLATFORM=$(uname -s | tr '[:upper:]' '[:lower:]') +UNAME_PLATFORM=${PLATFORM} +BINARY=protoc +if [[ ${PLATFORM} == "darwin" ]]; then + PLATFORM=osx + ARCH=universal_binary +elif [[ ${PLATFORM} == "linux" ]]; then + case ${ARCH} in + "arm64") + ARCH=aarch_64 + ;; + "x86_64") + ARCH=x86_64 + ;; + "aarch64") + ;; + *) + echo "-> Architecture ${ARCH} not supported"; exit 1; + ;; +esac +fi + +PROTOBUF_VERSION="v22.1" +PROTOBUF_VERSION_NO_V=$(echo ${PROTOBUF_VERSION}|tr -d 'v') + +PROTOC_PATH=build/${UNAME_PLATFORM}/${BINARY} +mkdir -p ${PROTOC_PATH} + +curl -sL -o ${PROTOC_PATH}/${BINARY}.zip https://github.com/protocolbuffers/protobuf/releases/download/${PROTOBUF_VERSION}/${BINARY}-${PROTOBUF_VERSION_NO_V}-${PLATFORM}-${ARCH}.zip + +BIN_PATH=${PROTOC_PATH}/bin/${BINARY} +cd ${PROTOC_PATH} && unzip ${BINARY}.zip +cd - +chmod +x ${BIN_PATH} diff --git a/copy/apm-aggregation/tools/tools.go b/copy/apm-aggregation/tools/tools.go new file mode 100644 index 00000000000..1e8c0e634e4 --- /dev/null +++ b/copy/apm-aggregation/tools/tools.go @@ -0,0 +1,16 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build tools +// +build tools + +package main + +import ( + _ "github.com/planetscale/vtprotobuf/generator" // go.mod + _ "golang.org/x/tools/cmd/goimports" // go.mod + _ "honnef.co/go/tools/cmd/staticcheck" // go.mod + + _ "github.com/elastic/go-licenser" // go.mod +) diff --git a/go.mod b/go.mod index 473a6ea7ad6..93f682a311e 100644 --- a/go.mod +++ b/go.mod @@ -175,3 +175,5 @@ replace ( // command line flags and conflicting with command line flags added by libbeat. github.com/golang/glog => ./internal/glog ) + +replace github.com/elastic/apm-aggregation => ./copy/apm-aggregation