diff --git a/.chloggen/stanley.liu_top-level-change-connector.yaml b/.chloggen/stanley.liu_top-level-change-connector.yaml new file mode 100644 index 000000000000..b49d9b7ff328 --- /dev/null +++ b/.chloggen/stanley.liu_top-level-change-connector.yaml @@ -0,0 +1,29 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: connector/datadog + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: The Datadog connector now has a config option to identify top-level spans by span kind. This new logic can be enabled by setting `traces::compute_top_level_by_span_kind` to true in the Datadog connector config. Default is false. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32005] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + `traces::compute_top_level_by_span_kind` needs to be enabled in both the Datadog connector and Datadog exporter configs if both components are being used. + With this new logic, root spans and spans with a server or consumer `span.kind` will be marked as top-level. Additionally, spans with a client or producer `span.kind` will have stats computed. + Enabling this config option may increase the number of spans that generate trace metrics, and may change which spans appear as top-level in Datadog. +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] \ No newline at end of file diff --git a/.chloggen/stanley.liu_top-level-change.yaml b/.chloggen/stanley.liu_top-level-change.yaml new file mode 100644 index 000000000000..b780ecbb502a --- /dev/null +++ b/.chloggen/stanley.liu_top-level-change.yaml @@ -0,0 +1,29 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: exporter/datadog + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: The Datadog exporter now has a config option to identify top-level spans by span kind. This new logic can be enabled by setting `traces::compute_top_level_by_span_kind` to true in the Datadog exporter config. Default is false. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32005] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + `traces::compute_top_level_by_span_kind` needs to be enabled in both the Datadog connector and Datadog exporter configs if both components are being used. + With this new logic, root spans and spans with a server or consumer `span.kind` will be marked as top-level. Additionally, spans with a client or producer `span.kind` will have stats computed. + Enabling this config option may increase the number of spans that generate trace metrics, and may change which spans appear as top-level in Datadog. +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] \ No newline at end of file diff --git a/connector/datadogconnector/config.go b/connector/datadogconnector/config.go index 1382f09c5740..bebece6aeb8c 100644 --- a/connector/datadogconnector/config.go +++ b/connector/datadogconnector/config.go @@ -42,8 +42,17 @@ type TracesConfig struct { // If set to true, enables an additional stats computation check on spans to see they have an eligible `span.kind` (server, consumer, client, producer). // If enabled, a span with an eligible `span.kind` will have stats computed. If disabled, only top-level and measured spans will have stats computed. // NOTE: For stats computed from OTel traces, only top-level spans are considered when this option is off. + // If you are sending OTel traces and want stats on non-top-level spans, this flag will need to be enabled. + // If you are sending OTel traces and do not want stats computed by span kind, you need to disable this flag and disable `compute_top_level_by_span_kind`. ComputeStatsBySpanKind bool `mapstructure:"compute_stats_by_span_kind"` + // If set to true, root spans and spans with a server or consumer `span.kind` will be marked as top-level. + // Additionally, spans with a client or producer `span.kind` will have stats computed. + // Enabling this config option may increase the number of spans that generate trace metrics, and may change which spans appear as top-level in Datadog. + // ComputeTopLevelBySpanKind needs to be enabled in both the Datadog connector and Datadog exporter configs if both components are being used. + // The default value is `false`. + ComputeTopLevelBySpanKind bool `mapstructure:"compute_top_level_by_span_kind"` + // If set to true, enables aggregation of peer related tags (e.g., `peer.service`, `db.instance`, etc.) in the datadog connector. // If disabled, aggregated trace stats will not include these tags as dimensions on trace metrics. // For the best experience with peer tags, Datadog also recommends enabling `compute_stats_by_span_kind`. diff --git a/connector/datadogconnector/connector.go b/connector/datadogconnector/connector.go index 22f412e8856a..03e83667d97d 100644 --- a/connector/datadogconnector/connector.go +++ b/connector/datadogconnector/connector.go @@ -88,7 +88,7 @@ func newTraceToMetricConnector(set component.TelemetrySettings, cfg component.Co ctx := context.Background() return &traceToMetricConnector{ logger: set.Logger, - agent: datadog.NewAgentWithConfig(ctx, getTraceAgentCfg(cfg.(*Config).Traces, attributesTranslator), in, metricsClient, timingReporter), + agent: datadog.NewAgentWithConfig(ctx, getTraceAgentCfg(set.Logger, cfg.(*Config).Traces, attributesTranslator), in, metricsClient, timingReporter), translator: trans, in: in, metricsConsumer: metricsConsumer, @@ -98,7 +98,7 @@ func newTraceToMetricConnector(set component.TelemetrySettings, cfg component.Co }, nil } -func getTraceAgentCfg(cfg TracesConfig, attributesTranslator *attributes.Translator) *traceconfig.AgentConfig { +func getTraceAgentCfg(logger *zap.Logger, cfg TracesConfig, attributesTranslator *attributes.Translator) *traceconfig.AgentConfig { acfg := traceconfig.New() acfg.OTLPReceiver.AttributesTranslator = attributesTranslator acfg.OTLPReceiver.SpanNameRemappings = cfg.SpanNameRemappings @@ -114,6 +114,10 @@ func getTraceAgentCfg(cfg TracesConfig, attributesTranslator *attributes.Transla if v := cfg.TraceBuffer; v > 0 { acfg.TraceBuffer = v } + if cfg.ComputeTopLevelBySpanKind { + logger.Info("traces::compute_top_level_by_span_kind needs to be enabled in both the Datadog connector and Datadog exporter configs if both components are being used") + acfg.Features["enable_otlp_compute_top_level_by_span_kind"] = struct{}{} + } return acfg } diff --git a/connector/datadogconnector/examples/config.yaml b/connector/datadogconnector/examples/config.yaml index 5d6f03374b67..d01c9b65ce25 100644 --- a/connector/datadogconnector/examples/config.yaml +++ b/connector/datadogconnector/examples/config.yaml @@ -41,7 +41,16 @@ connectors: ## If enabled, a span with an eligible `span.kind` will have stats computed. If disabled, only top-level and measured spans will have stats computed. ## NOTE: For stats computed from OTel traces, only top-level spans are considered when this option is off. # + ## If you are sending OTel traces and want stats on non-top-level spans, this flag will need to be enabled. + ## If you are sending OTel traces and do not want stats computed by span kind, you need to disable this flag and disable `compute_top_level_by_span_kind`. + # compute_stats_by_span_kind: true + ## @param compute_top_level_by_span_kind - enables top-level span identification based on `span.kind` - optional + ## If set to true, root spans and spans with a server or consumer `span.kind` will be marked as top-level. + ## Additionally, spans with a client or producer `span.kind` will have stats computed. + ## Enabling this config option may increase the number of spans that generate trace metrics, and may change which spans appear as top-level in Datadog. + # + compute_top_level_by_span_kind: false ## @param peer_tags_aggregation - enables aggregation of peer related tags in Datadog exporter - optional ## If set to true, enables aggregation of peer related tags (e.g., `peer.service`, `db.instance`, etc.) in Datadog exporter. ## If disabled, aggregated trace stats will not include these tags as dimensions on trace metrics. diff --git a/exporter/datadogexporter/config.go b/exporter/datadogexporter/config.go index b1565de12df5..631af88b4d67 100644 --- a/exporter/datadogexporter/config.go +++ b/exporter/datadogexporter/config.go @@ -273,8 +273,17 @@ type TracesConfig struct { // If set to true, enables an additional stats computation check on spans to see they have an eligible `span.kind` (server, consumer, client, producer). // If enabled, a span with an eligible `span.kind` will have stats computed. If disabled, only top-level and measured spans will have stats computed. // NOTE: For stats computed from OTel traces, only top-level spans are considered when this option is off. + // If you are sending OTel traces and want stats on non-top-level spans, this flag will need to be enabled. + // If you are sending OTel traces and do not want stats computed by span kind, you need to disable this flag and disable `compute_top_level_by_span_kind`. ComputeStatsBySpanKind bool `mapstructure:"compute_stats_by_span_kind"` + // If set to true, root spans and spans with a server or consumer `span.kind` will be marked as top-level. + // Additionally, spans with a client or producer `span.kind` will have stats computed. + // Enabling this config option may increase the number of spans that generate trace metrics, and may change which spans appear as top-level in Datadog. + // ComputeTopLevelBySpanKind needs to be enabled in both the Datadog connector and Datadog exporter configs if both components are being used. + // The default value is `false`. + ComputeTopLevelBySpanKind bool `mapstructure:"compute_top_level_by_span_kind"` + // If set to true, enables `peer.service` aggregation in the exporter. If disabled, aggregated trace stats will not include `peer.service` as a dimension. // For the best experience with `peer.service`, it is recommended to also enable `compute_stats_by_span_kind`. // If enabling both causes the datadog exporter to consume too many resources, try disabling `compute_stats_by_span_kind` first. diff --git a/exporter/datadogexporter/examples/collector.yaml b/exporter/datadogexporter/examples/collector.yaml index 52c217eb8cee..c059969120bb 100644 --- a/exporter/datadogexporter/examples/collector.yaml +++ b/exporter/datadogexporter/examples/collector.yaml @@ -357,8 +357,18 @@ exporters: ## If enabled, a span with an eligible `span.kind` will have stats computed. If disabled, only top-level and measured spans will have stats computed. ## NOTE: For stats computed from OTel traces, only top-level spans are considered when this option is off. # + ## If you are sending OTel traces and want stats on non-top-level spans, this flag will need to be enabled. + ## If you are sending OTel traces and do not want stats computed by span kind, you need to disable this flag and disable `compute_top_level_by_span_kind`. + # # compute_stats_by_span_kind: true + ## @param compute_top_level_by_span_kind - enables top-level span identification based on `span.kind` - optional + ## If set to true, root spans and spans with a server or consumer `span.kind` will be marked as top-level. + ## Additionally, spans with a client or producer `span.kind` will have stats computed. + ## Enabling this config option may increase the number of spans that generate trace metrics, and may change which spans appear as top-level in Datadog. + # + # compute_top_level_by_span_kind: false + ## @param peer_service_aggregation - enables `peer.service` aggregation on trace stats in Datadog exporter - optional ## If set to true, enables `peer.service` aggregation in the exporter. If disabled, aggregated trace stats will not include `peer.service` as a dimension. ## For the best experience with `peer.service`, it is recommended to also enable `compute_stats_by_span_kind`. diff --git a/exporter/datadogexporter/integrationtest/integration_test.go b/exporter/datadogexporter/integrationtest/integration_test.go index ab51f52f7e01..fc85bd4ed38d 100644 --- a/exporter/datadogexporter/integrationtest/integration_test.go +++ b/exporter/datadogexporter/integrationtest/integration_test.go @@ -44,6 +44,124 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor" ) +const collectorConfig = ` +receivers: + otlp: + protocols: + http: + endpoint: "localhost:4318" + grpc: + endpoint: "localhost:4317" + +processors: + batch: + send_batch_size: 10 + timeout: 5s + tail_sampling: + decision_wait: 1s + policies: [ + { + name: sample_flag, + type: boolean_attribute, + boolean_attribute: { key: sampled, value: true }, + } + ] + +connectors: + datadog/connector: + traces: + compute_stats_by_span_kind: true + peer_tags_aggregation: true + peer_tags: ["extra_peer_tag"] + +exporters: + debug: + verbosity: detailed + datadog: + api: + key: "key" + tls: + insecure_skip_verify: true + host_metadata: + enabled: false + traces: + endpoint: %q + trace_buffer: 10 + metrics: + endpoint: %q + +service: + telemetry: + metrics: + level: none + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [datadog/connector] + traces/2: # this pipeline uses sampling + receivers: [datadog/connector] + processors: [tail_sampling, batch] + exporters: [datadog, debug] + metrics: + receivers: [datadog/connector] + processors: [batch] + exporters: [datadog, debug]` + +const collectorConfigComputeTopLevelBySpanKind = ` +receivers: + otlp: + protocols: + http: + endpoint: "localhost:4318" + grpc: + endpoint: "localhost:4317" + +processors: + batch: + send_batch_size: 10 + timeout: 5s + +connectors: + datadog/connector: + traces: + compute_top_level_by_span_kind: true + +exporters: + debug: + verbosity: detailed + datadog: + api: + key: "key" + tls: + insecure_skip_verify: true + host_metadata: + enabled: false + traces: + endpoint: %q + trace_buffer: 10 + compute_top_level_by_span_kind: true + metrics: + endpoint: %q + +service: + telemetry: + metrics: + level: none + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [datadog/connector] + traces/2: + receivers: [datadog/connector] + processors: [batch] + exporters: [datadog, debug] + metrics: + receivers: [datadog/connector] + processors: [batch] + exporters: [datadog, debug]` + func TestIntegration(t *testing.T) { // 1. Set up mock Datadog server // See also https://github.com/DataDog/datadog-agent/blob/49c16e0d4deab396626238fa1d572b684475a53f/cmd/trace-agent/test/backend.go @@ -54,7 +172,7 @@ func TestIntegration(t *testing.T) { // 2. Start in-process collector factories := getIntegrationTestComponents(t) - app, confFilePath := getIntegrationTestCollector(t, server.URL, factories) + app, confFilePath := getIntegrationTestCollector(t, collectorConfig, server.URL, factories) go func() { assert.NoError(t, app.Run(context.Background())) }() @@ -143,70 +261,8 @@ func getIntegrationTestComponents(t *testing.T) otelcol.Factories { return factories } -func getIntegrationTestCollector(t *testing.T, url string, factories otelcol.Factories) (*otelcol.Collector, string) { - cfg := fmt.Sprintf(` -receivers: - otlp: - protocols: - http: - endpoint: "localhost:4318" - grpc: - endpoint: "localhost:4317" - -processors: - batch: - send_batch_size: 10 - timeout: 5s - tail_sampling: - decision_wait: 1s - policies: [ - { - name: sample_flag, - type: boolean_attribute, - boolean_attribute: { key: sampled, value: true }, - } - ] - -connectors: - datadog/connector: - traces: - compute_stats_by_span_kind: true - peer_tags_aggregation: true - peer_tags: ["extra_peer_tag"] - -exporters: - debug: - verbosity: detailed - datadog: - api: - key: "key" - tls: - insecure_skip_verify: true - host_metadata: - enabled: false - traces: - endpoint: %q - trace_buffer: 10 - metrics: - endpoint: %q - -service: - telemetry: - metrics: - level: none - pipelines: - traces: - receivers: [otlp] - processors: [batch] - exporters: [datadog/connector] - traces/2: # this pipeline uses sampling - receivers: [datadog/connector] - processors: [tail_sampling, batch] - exporters: [datadog, debug] - metrics: - receivers: [datadog/connector] - processors: [batch] - exporters: [datadog, debug]`, url, url) +func getIntegrationTestCollector(t *testing.T, cfgStr string, url string, factories otelcol.Factories) (*otelcol.Collector, string) { + cfg := fmt.Sprintf(cfgStr, url, url) confFile, err := os.CreateTemp(os.TempDir(), "conf-") require.NoError(t, err) @@ -304,3 +360,165 @@ func getGzipReader(t *testing.T, reqBytes []byte) io.Reader { require.NoError(t, err) return reader } + +func TestIntegrationComputeTopLevelBySpanKind(t *testing.T) { + // 1. Set up mock Datadog server + // See also https://github.com/DataDog/datadog-agent/blob/49c16e0d4deab396626238fa1d572b684475a53f/cmd/trace-agent/test/backend.go + apmstatsRec := &testutil.HTTPRequestRecorderWithChan{Pattern: testutil.APMStatsEndpoint, ReqChan: make(chan []byte)} + tracesRec := &testutil.HTTPRequestRecorderWithChan{Pattern: testutil.TraceEndpoint, ReqChan: make(chan []byte)} + server := testutil.DatadogServerMock(apmstatsRec.HandlerFunc, tracesRec.HandlerFunc) + defer server.Close() + + // 2. Start in-process collector + factories := getIntegrationTestComponents(t) + app, confFilePath := getIntegrationTestCollector(t, collectorConfigComputeTopLevelBySpanKind, server.URL, factories) + go func() { + assert.NoError(t, app.Run(context.Background())) + }() + defer app.Shutdown() + defer os.Remove(confFilePath) + waitForReadiness(app) + + // 3. Generate and send traces + sendTracesComputeTopLevelBySpanKind(t) + + // 4. Validate traces and APM stats from the mock server + var spans []*pb.Span + var stats []*pb.ClientGroupedStats + var serverSpans, clientSpans, consumerSpans, producerSpans, internalSpans int + + // 10 total spans + APM stats on 8 spans are sent to datadog exporter + for len(spans) < 10 || len(stats) < 8 { + select { + case tracesBytes := <-tracesRec.ReqChan: + gz := getGzipReader(t, tracesBytes) + slurp, err := io.ReadAll(gz) + require.NoError(t, err) + var traces pb.AgentPayload + require.NoError(t, proto.Unmarshal(slurp, &traces)) + for _, tps := range traces.TracerPayloads { + for _, chunks := range tps.Chunks { + spans = append(spans, chunks.Spans...) + } + } + + case apmstatsBytes := <-apmstatsRec.ReqChan: + gz := getGzipReader(t, apmstatsBytes) + var spl pb.StatsPayload + require.NoError(t, msgp.Decode(gz, &spl)) + for _, csps := range spl.Stats { + assert.Equal(t, "datadogexporter-otelcol-tests", spl.AgentVersion) + for _, csbs := range csps.Stats { + stats = append(stats, csbs.Stats...) + for _, stat := range csbs.Stats { + switch stat.SpanKind { + case apitrace.SpanKindInternal.String(): + internalSpans++ + case apitrace.SpanKindServer.String(): + assert.Equal(t, uint64(1), stat.Hits) + assert.Equal(t, uint64(1), stat.TopLevelHits) + serverSpans++ + case apitrace.SpanKindClient.String(): + assert.Equal(t, uint64(1), stat.Hits) + assert.Equal(t, uint64(0), stat.TopLevelHits) + clientSpans++ + case apitrace.SpanKindProducer.String(): + assert.Equal(t, uint64(1), stat.Hits) + assert.Equal(t, uint64(0), stat.TopLevelHits) + producerSpans++ + case apitrace.SpanKindConsumer.String(): + assert.Equal(t, uint64(1), stat.Hits) + assert.Equal(t, uint64(1), stat.TopLevelHits) + consumerSpans++ + } + assert.True(t, strings.HasPrefix(stat.Resource, "TestSpan")) + } + } + } + } + } + + // Verify we don't receive more than the expected numbers + assert.Equal(t, 2, serverSpans) + assert.Equal(t, 2, clientSpans) + assert.Equal(t, 2, consumerSpans) + assert.Equal(t, 2, producerSpans) + assert.Equal(t, 0, internalSpans) + assert.Len(t, spans, 10) + assert.Len(t, stats, 8) + + for _, span := range spans { + switch { + case span.Meta["span.kind"] == apitrace.SpanKindInternal.String(): + assert.EqualValues(t, 0, span.Metrics["_top_level"]) + assert.EqualValues(t, 0, span.Metrics["_dd.measured"]) + case span.Meta["span.kind"] == apitrace.SpanKindServer.String(): + assert.EqualValues(t, 1, span.Metrics["_top_level"]) + assert.EqualValues(t, 0, span.Metrics["_dd.measured"]) + case span.Meta["span.kind"] == apitrace.SpanKindClient.String(): + assert.EqualValues(t, 0, span.Metrics["_top_level"]) + assert.EqualValues(t, 1, span.Metrics["_dd.measured"]) + case span.Meta["span.kind"] == apitrace.SpanKindProducer.String(): + assert.EqualValues(t, 0, span.Metrics["_top_level"]) + assert.EqualValues(t, 1, span.Metrics["_dd.measured"]) + case span.Meta["span.kind"] == apitrace.SpanKindConsumer.String(): + assert.EqualValues(t, 1, span.Metrics["_top_level"]) + assert.EqualValues(t, 0, span.Metrics["_dd.measured"]) + } + } +} + +func sendTracesComputeTopLevelBySpanKind(t *testing.T) { + ctx := context.Background() + + // Set up OTel-Go SDK and exporter + traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure()) + require.NoError(t, err) + bsp := sdktrace.NewBatchSpanProcessor(traceExporter) + r1, _ := resource.New(ctx, resource.WithAttributes(attribute.String("k8s.node.name", "aaaa"))) + r2, _ := resource.New(ctx, resource.WithAttributes(attribute.String("k8s.node.name", "bbbb"))) + tracerProvider := sdktrace.NewTracerProvider( + sdktrace.WithSampler(sdktrace.AlwaysSample()), + sdktrace.WithSpanProcessor(bsp), + sdktrace.WithResource(r1), + ) + tracerProvider2 := sdktrace.NewTracerProvider( + sdktrace.WithSampler(sdktrace.AlwaysSample()), + sdktrace.WithSpanProcessor(bsp), + sdktrace.WithResource(r2), + ) + otel.SetTracerProvider(tracerProvider) + defer func() { + require.NoError(t, tracerProvider.Shutdown(ctx)) + require.NoError(t, tracerProvider2.Shutdown(ctx)) + }() + + tracer := otel.Tracer("test-tracer") + for i := 0; i < 10; i++ { + var spanKind apitrace.SpanKind + switch i { + case 0, 1: + spanKind = apitrace.SpanKindConsumer + case 2, 3: + spanKind = apitrace.SpanKindServer + case 4, 5: + spanKind = apitrace.SpanKindClient + case 6, 7: + spanKind = apitrace.SpanKindProducer + case 8, 9: + spanKind = apitrace.SpanKindInternal + } + var span apitrace.Span + ctx, span = tracer.Start(ctx, fmt.Sprintf("TestSpan%d", i), apitrace.WithSpanKind(spanKind)) + + if i == 3 { + // Send some traces from a different resource + // This verifies that stats from different hosts don't accidentally create extraneous empty stats buckets + otel.SetTracerProvider(tracerProvider2) + tracer = otel.Tracer("test-tracer2") + } + + span.End() + } + time.Sleep(1 * time.Second) +} diff --git a/exporter/datadogexporter/traces_exporter.go b/exporter/datadogexporter/traces_exporter.go index 8b0e4dc4cb91..640e22b0b1be 100644 --- a/exporter/datadogexporter/traces_exporter.go +++ b/exporter/datadogexporter/traces_exporter.go @@ -219,6 +219,9 @@ func newTraceAgentConfig(ctx context.Context, params exporter.CreateSettings, cf if addr := cfg.Traces.Endpoint; addr != "" { acfg.Endpoints[0].Host = addr } + if cfg.Traces.ComputeTopLevelBySpanKind { + acfg.Features["enable_otlp_compute_top_level_by_span_kind"] = struct{}{} + } tracelog.SetLogger(&zaplogger{params.Logger}) //TODO: This shouldn't be a singleton return acfg, nil }