diff --git a/.github/workflows/appsec.yml b/.github/workflows/appsec.yml
index f913f17fdf..33bed353f0 100644
--- a/.github/workflows/appsec.yml
+++ b/.github/workflows/appsec.yml
@@ -40,6 +40,9 @@ concurrency:
# Automatically cancel previous runs if a new one is triggered to conserve resources.
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }}
+permissions:
+ contents: read
+
jobs:
# Prepare the cache of Go modules to share it will the other jobs.
# This maximizes cache hits and minimizes the time spent downloading Go modules.
@@ -95,7 +98,7 @@ jobs:
strategy:
matrix:
runs-on: [ macos-12, macos-14 ] # oldest and newest macos runners available - macos-14 mainly is here to cover the fact it is an ARM machine
- go-version: [ "1.22", "1.21" ]
+ go-version: [ "1.23", "1.22" ]
fail-fast: true # saving some CI time - macos runners too long to get
steps:
- uses: actions/checkout@v4
@@ -187,7 +190,7 @@ jobs:
needs: go-mod-caching
strategy:
matrix:
- go-version: [ "1.22", "1.21" ]
+ go-version: [ "1.23", "1.22" ]
distribution: [ bookworm, bullseye, alpine ]
platform: [ linux/amd64, linux/arm64 ]
diff --git a/.github/workflows/datadog-static-analysis.yml b/.github/workflows/datadog-static-analysis.yml
index 8094914c28..9a00adaad1 100644
--- a/.github/workflows/datadog-static-analysis.yml
+++ b/.github/workflows/datadog-static-analysis.yml
@@ -2,6 +2,10 @@ on: [push]
name: Datadog Static Analysis
+permissions:
+ contents: read
+ pull-requests: write
+
jobs:
static-analysis:
runs-on: ubuntu-latest
diff --git a/.github/workflows/ecosystems-label-issue copy.yml b/.github/workflows/ecosystems-label-issue.yml
similarity index 90%
rename from .github/workflows/ecosystems-label-issue copy.yml
rename to .github/workflows/ecosystems-label-issue.yml
index f63226c003..29853e45bc 100644
--- a/.github/workflows/ecosystems-label-issue copy.yml
+++ b/.github/workflows/ecosystems-label-issue.yml
@@ -5,6 +5,9 @@ on:
- reopened
- opened
- edited
+permissions:
+ contents: read
+ issues: write
jobs:
label_issues:
if: contains(github.event.issue.title, 'contrib')
diff --git a/.github/workflows/ecosystems-label-pr.yml b/.github/workflows/ecosystems-label-pr.yml
index 36f35b5422..4cadafd3e7 100644
--- a/.github/workflows/ecosystems-label-pr.yml
+++ b/.github/workflows/ecosystems-label-pr.yml
@@ -7,6 +7,9 @@ on:
- opened
- reopened
- edited
+permissions:
+ contents: read
+ pull-requests: write
jobs:
label_issues:
runs-on: ubuntu-latest
diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml
index 152b5b50b8..eaf5ed78d9 100644
--- a/.github/workflows/govulncheck.yml
+++ b/.github/workflows/govulncheck.yml
@@ -14,6 +14,9 @@ on:
- cron: '00 00 * * *'
workflow_dispatch:
+permissions:
+ contents: read
+
jobs:
govulncheck-tests:
runs-on: ubuntu-latest
diff --git a/.github/workflows/main-branch-tests.yml b/.github/workflows/main-branch-tests.yml
index 96e4c7ea25..7d738ccd9a 100644
--- a/.github/workflows/main-branch-tests.yml
+++ b/.github/workflows/main-branch-tests.yml
@@ -22,7 +22,7 @@ jobs:
unit-integration-tests:
strategy:
matrix:
- go-version: [ "1.21", "1.22" ]
+ go-version: [ "1.22", "1.23" ]
fail-fast: false
uses: ./.github/workflows/unit-integration-tests.yml
with:
@@ -33,7 +33,7 @@ jobs:
strategy:
matrix:
runs-on: [ macos-latest, windows-latest, ubuntu-latest ]
- go-version: [ "1.21", "1.22" ]
+ go-version: [ "1.22", "1.23" ]
fail-fast: false
uses: ./.github/workflows/multios-unit-tests.yml
with:
diff --git a/.github/workflows/multios-unit-tests.yml b/.github/workflows/multios-unit-tests.yml
index 3ca8900602..1cdd9191b6 100644
--- a/.github/workflows/multios-unit-tests.yml
+++ b/.github/workflows/multios-unit-tests.yml
@@ -29,6 +29,9 @@ on:
env:
DD_APPSEC_WAF_TIMEOUT: 1m # Increase time WAF time budget to reduce CI flakiness
+permissions:
+ contents: read
+
jobs:
test-multi-os:
runs-on: "${{ inputs.runs-on }}"
diff --git a/.github/workflows/orchestrion.yml b/.github/workflows/orchestrion.yml
index f7578e4ecd..0603c8b139 100644
--- a/.github/workflows/orchestrion.yml
+++ b/.github/workflows/orchestrion.yml
@@ -17,7 +17,7 @@ concurrency:
jobs:
test:
name: 'Run Tests'
- uses: DataDog/orchestrion/.github/workflows/workflow_call.yml@eliott.bouhana/APPSEC-53773 # we don't want to pin our own action
+ uses: DataDog/orchestrion/.github/workflows/workflow_call.yml@main # we don't want to pin our own action
with:
dd-trace-go-ref: ${{ github.sha }}
runs-on: ubuntu-latest-16-cores
diff --git a/.github/workflows/parametric-tests.yml b/.github/workflows/parametric-tests.yml
index 5a91d0e3ce..a25c01a2c3 100644
--- a/.github/workflows/parametric-tests.yml
+++ b/.github/workflows/parametric-tests.yml
@@ -21,6 +21,9 @@ on:
schedule:
- cron: '00 04 * * 2-6'
+permissions:
+ contents: read
+
jobs:
parametric-tests:
if: github.event_name != 'pull_request' || (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == 'DataDog/dd-trace-go')
diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml
index 128967216d..9513b2e328 100644
--- a/.github/workflows/pull-request.yml
+++ b/.github/workflows/pull-request.yml
@@ -18,6 +18,6 @@ jobs:
name: PR Unit and Integration Tests
uses: ./.github/workflows/unit-integration-tests.yml
with:
- go-version: "1.21"
+ go-version: "1.22"
ref: ${{ github.ref }}
secrets: inherit
diff --git a/.github/workflows/smoke-tests.yml b/.github/workflows/smoke-tests.yml
index 9bc2c62905..cee00b0d34 100644
--- a/.github/workflows/smoke-tests.yml
+++ b/.github/workflows/smoke-tests.yml
@@ -27,6 +27,9 @@ on:
env:
TEST_RESULTS: /tmp/test-results # path to where test results will be saved
+permissions:
+ contents: read
+
jobs:
go-get-u:
# Run go get -u to upgrade dd-trace-go dependencies to their
@@ -70,13 +73,20 @@ jobs:
# Run go mod tidy to ensure that all go.mod and go.sum files are up-to-date.
name: 'go mod tidy smoke test'
runs-on: ubuntu-latest
+ env:
+ # Users may build our library with GOTOOLCHAIN=local. If they do, and our
+ # go.mod file specifies a newer Go version than their local toolchain, their
+ # build will break. Run our tests with GOTOOLCHAIN=local to ensure that
+ # our library builds with all of the Go versions we claim to support,
+ # without having to download a newer one.
+ GOTOOLCHAIN: local
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.ref || github.ref }}
- uses: actions/setup-go@v3
with:
- go-version: "1.21"
+ go-version: "1.22"
cache: true
- name: go mod tidy
run: |-
@@ -99,7 +109,7 @@ jobs:
matrix:
# TODO: cross-compilation from/to different hardware architectures once
# github provides native ARM runners.
- go: [ "1.21", "1.22", "1.23-rc" ]
+ go: [ "1.22", "1.23" ]
build-env: [ alpine, bookworm, bullseye ]
build-with-cgo: [ 0, 1 ]
deployment-env: [ alpine, debian11, debian12, al2, al2023, busybox, scratch ]
@@ -171,7 +181,7 @@ jobs:
uses: docker/build-push-action@v5
with:
context: .
- file: ./internal/apps/setup-smoke-test/Dockerfile
+ file: ./internal/setup-smoke-test/Dockerfile
push: false
load: true
tags: smoke-test
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index caec4742e2..24ffaae4da 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -4,6 +4,10 @@ on:
schedule:
- cron: '30 1 * * *'
+permissions:
+ contents: read
+ issues: write
+
jobs:
stale:
runs-on: ubuntu-latest
diff --git a/.github/workflows/system-tests.yml b/.github/workflows/system-tests.yml
index 27db6ebc7f..f56643f178 100644
--- a/.github/workflows/system-tests.yml
+++ b/.github/workflows/system-tests.yml
@@ -27,6 +27,9 @@ on:
schedule:
- cron: '00 04 * * 2-6'
+permissions:
+ contents: read
+
jobs:
system-tests:
if: github.event_name != 'pull_request' || (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == 'DataDog/dd-trace-go')
@@ -43,6 +46,9 @@ jobs:
- uds-echo
scenario:
- DEFAULT
+ - INTEGRATIONS
+ - INTEGRATIONS_AWS
+ - CROSSED_TRACING_LIBRARIES
- APPSEC_DISABLED
- APPSEC_BLOCKING
- APPSEC_BLOCKING_FULL_DENYLIST
@@ -56,7 +62,7 @@ jobs:
- weblog-variant: net-http
scenario: REMOTE_CONFIG_MOCKED_BACKEND_ASM_FEATURES
- weblog-variant: net-http
- scenario: REMOTE_CONFIG_MOCKED_BACKEND_ASM_FEATURES
+ scenario: REMOTE_CONFIG_MOCKED_BACKEND_LIVE_DEBUGGING
- weblog-variant: net-http
scenario: REMOTE_CONFIG_MOCKED_BACKEND_ASM_DD
# AppSec scenarios that don't depend on the integrations, so we just run on the net/http variant
@@ -103,6 +109,8 @@ jobs:
DD_API_KEY: ${{ secrets.DD_API_KEY }}
SYSTEM_TESTS_E2E_DD_API_KEY: ${{ secrets.SYSTEM_TESTS_E2E_DD_API_KEY }}
SYSTEM_TESTS_E2E_DD_APP_KEY: ${{ secrets.SYSTEM_TESTS_E2E_DD_APP_KEY }}
+ SYSTEM_TESTS_AWS_ACCESS_KEY_ID: ${{ secrets.SYSTEM_TESTS_IDM_AWS_ACCESS_KEY_ID }}
+ SYSTEM_TESTS_AWS_SECRET_ACCESS_KEY: ${{ secrets.SYSTEM_TESTS_IDM_AWS_SECRET_ACCESS_KEY }}
name: Test (${{ matrix.weblog-variant }}, ${{ matrix.scenario }})
steps:
- name: Checkout system tests
@@ -134,7 +142,7 @@ jobs:
run: tar -czvf artifact.tar.gz $(ls | grep logs)
- name: Upload artifact
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v4
if: ${{ always() }}
with:
name: logs_${{ matrix.weblog-variant }}_${{ matrix.scenario }}
diff --git a/.github/workflows/test-apps.cue b/.github/workflows/test-apps.cue
index 72e6953cef..1f45dea13b 100644
--- a/.github/workflows/test-apps.cue
+++ b/.github/workflows/test-apps.cue
@@ -115,6 +115,10 @@ env: {
DD_TAGS: "github_run_id:${{ github.run_id }} github_run_number:${{ github.run_number }} ${{ inputs['arg: tags'] }}",
}
+permissions: {
+ contents: "read",
+}
+
jobs: {
for i, scenario in #scenarios {
for j, env in #envs {
diff --git a/.github/workflows/test-apps.yml b/.github/workflows/test-apps.yml
index 95044a8564..bff3c60c53 100644
--- a/.github/workflows/test-apps.yml
+++ b/.github/workflows/test-apps.yml
@@ -64,6 +64,8 @@ name: Test Apps
env:
DD_ENV: github
DD_TAGS: 'github_run_id:${{ github.run_id }} github_run_number:${{ github.run_number }} ${{ inputs[''arg: tags''] }}'
+permissions:
+ contents: read
jobs:
job-0-0:
name: unit-of-work/v1 (prod)
diff --git a/.github/workflows/unit-integration-tests.yml b/.github/workflows/unit-integration-tests.yml
index 0eb78e3b7e..bc1faec295 100644
--- a/.github/workflows/unit-integration-tests.yml
+++ b/.github/workflows/unit-integration-tests.yml
@@ -13,6 +13,15 @@ on:
env:
DD_APPSEC_WAF_TIMEOUT: 1m # Increase time WAF time budget to reduce CI flakiness
+ # Users may build our library with GOTOOLCHAIN=local. If they do, and our
+ # go.mod file specifies a newer Go version than their local toolchain, their
+ # build will break. Run our tests with GOTOOLCHAIN=local to ensure that
+ # our library builds with all of the Go versions we claim to support,
+ # without having to download a newer one.
+ GOTOOLCHAIN: local
+
+permissions:
+ contents: read
jobs:
copyright:
@@ -22,7 +31,10 @@ jobs:
uses: actions/checkout@v3
with:
ref: ${{ inputs.ref || github.ref }}
-
+ - name: Setup go
+ uses: actions/setup-go@v5
+ with:
+ go-version: stable
- name: Copyright
run: |
go run checkcopyright.go
@@ -163,20 +175,24 @@ jobs:
image: memcached:1.5.9
ports:
- 11211:11211
- zookeeper:
- image: bitnami/zookeeper:latest
- env:
- ALLOW_ANONYMOUS_LOGIN: "yes"
- ports:
- - 2181:2181
kafka:
- image: darccio/kafka:2.13-2.8.1
+ image: confluentinc/confluent-local:7.5.0
env:
- KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092
- KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
- KAFKA_CREATE_TOPICS: gotest:1:1,gosegtest:1:1
- KAFKA_BROKER_ID: 1
+ KAFKA_LISTENERS: "PLAINTEXT://0.0.0.0:9093,BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9094"
+ KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://localhost:9093,BROKER://localhost:9092"
+ KAFKA_REST_BOOTSTRAP_SERVERS: "PLAINTEXT://0.0.0.0:9093,BROKER://0.0.0.0:9092"
+ KAFKA_CONTROLLER_QUORUM_VOTERS: "1@localhost:9094"
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "BROKER:PLAINTEXT,PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT"
+ KAFKA_INTER_BROKER_LISTENER_NAME: "BROKER"
+ KAFKA_BROKER_ID: "1"
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1"
+ KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1"
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: "1"
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: "1"
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: "0"
+ KAFKA_NODE_ID: "1"
+ KAFKA_PROCESS_ROLES: "broker,controller"
+ KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
ports:
- 9092:9092
localstack:
diff --git a/README.md b/README.md
index 070fb5720a..7cf2885d08 100644
--- a/README.md
+++ b/README.md
@@ -10,7 +10,7 @@
This repository contains Go packages for the client-side components of the Datadog product suite for Application Performance Monitoring, Continuous Profiling and Application Security Monitoring of Go applications.
-- [Datadog Application Performance Monitoring (APM)](https://docs.datadoghq.com/tracing/): Trace requests as they flow across web servers, databases and microservices so that developers have great visiblity into bottlenecks and troublesome requests.
+- [Datadog Application Performance Monitoring (APM)](https://docs.datadoghq.com/tracing/): Trace requests as they flow across web servers, databases and microservices so that developers have great visibility into bottlenecks and troublesome requests.
The package [`gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer`](https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer) allows you to trace any piece of your Go code, and commonly used Go libraries can be automatically traced thanks to our out-of-the-box integrations which can be found in the package [`gopkg.in/DataDog/dd-trace-go.v1/ddtrace/contrib`](https://pkg.go.dev/gopkg.in/DataDog/dd-trace-go.v1/contrib).
- [Datadog Go Continuous Profiler](https://docs.datadoghq.com/profiler/): Continuously profile your Go apps to find CPU, memory, and synchronization bottlenecks, broken down by function name, and line number, to significantly reduce end-user latency and infrastructure costs.
diff --git a/appsec/appsec.go b/appsec/appsec.go
index af8cd0aa5a..33286cdc17 100644
--- a/appsec/appsec.go
+++ b/appsec/appsec.go
@@ -19,7 +19,7 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/usersec"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
)
@@ -33,7 +33,7 @@ var appsecDisabledLog sync.Once
// Note that passing the raw bytes of the HTTP request body is not expected and would
// result in inaccurate attack detection.
// This function always returns nil when appsec is disabled.
-func MonitorParsedHTTPBody(ctx context.Context, body interface{}) error {
+func MonitorParsedHTTPBody(ctx context.Context, body any) error {
if !appsec.Enabled() {
appsecDisabledLog.Do(func() { log.Warn("appsec: not enabled. Body blocking checks won't be performed.") })
return nil
@@ -60,7 +60,15 @@ func SetUser(ctx context.Context, id string, opts ...tracer.UserMonitoringOption
appsecDisabledLog.Do(func() { log.Warn("appsec: not enabled. User blocking checks won't be performed.") })
return nil
}
- return sharedsec.MonitorUser(ctx, id)
+
+ op, errPtr := usersec.StartUserLoginOperation(ctx, usersec.UserLoginOperationArgs{})
+ op.Finish(usersec.UserLoginOperationRes{
+ UserID: id,
+ SessionID: getSessionID(opts...),
+ Success: true,
+ })
+
+ return *errPtr
}
// TrackUserLoginSuccessEvent sets a successful user login event, with the given
@@ -76,17 +84,7 @@ func SetUser(ctx context.Context, id string, opts ...tracer.UserMonitoringOption
// Take-Over (ATO) monitoring, ultimately blocking the IP address and/or user id
// associated to them.
func TrackUserLoginSuccessEvent(ctx context.Context, uid string, md map[string]string, opts ...tracer.UserMonitoringOption) error {
- span := getRootSpan(ctx)
- if span == nil {
- return nil
- }
-
- const tagPrefix = "appsec.events.users.login.success."
- span.SetTag(tagPrefix+"track", true)
- for k, v := range md {
- span.SetTag(tagPrefix+k, v)
- }
- span.SetTag(ext.SamplingPriority, ext.PriorityUserKeep)
+ TrackCustomEvent(ctx, "users.login.success", md)
return SetUser(ctx, uid, opts...)
}
@@ -106,14 +104,15 @@ func TrackUserLoginFailureEvent(ctx context.Context, uid string, exists bool, md
return
}
- const tagPrefix = "appsec.events.users.login.failure."
- span.SetTag(tagPrefix+"track", true)
- span.SetTag(tagPrefix+"usr.id", uid)
- span.SetTag(tagPrefix+"usr.exists", exists)
- for k, v := range md {
- span.SetTag(tagPrefix+k, v)
- }
- span.SetTag(ext.SamplingPriority, ext.PriorityUserKeep)
+ // We need to do the first call to SetTag ourselves because the map taken by TrackCustomEvent is map[string]string
+ // and not map [string]any, so the `exists` boolean variable does not fit int
+ span.SetTag("appsec.events.users.login.failure.usr.exists", exists)
+ span.SetTag("appsec.events.users.login.failure.usr.id", uid)
+
+ TrackCustomEvent(ctx, "users.login.failure", md)
+
+ op, _ := usersec.StartUserLoginOperation(ctx, usersec.UserLoginOperationArgs{})
+ op.Finish(usersec.UserLoginOperationRes{UserID: uid, Success: false})
}
// TrackCustomEvent sets a custom event as service entry span tags. This span is
@@ -153,3 +152,13 @@ func getRootSpan(ctx context.Context) tracer.Span {
log.Error("appsec: could not access the root span")
return nil
}
+
+func getSessionID(opts ...tracer.UserMonitoringOption) string {
+ cfg := &tracer.UserMonitoringConfig{
+ Metadata: make(map[string]string),
+ }
+ for _, opt := range opts {
+ opt(cfg)
+ }
+ return cfg.SessionID
+}
diff --git a/contrib/99designs/gqlgen/appsec_test.go b/contrib/99designs/gqlgen/appsec_test.go
index 4b7eefb8b1..5128530b10 100644
--- a/contrib/99designs/gqlgen/appsec_test.go
+++ b/contrib/99designs/gqlgen/appsec_test.go
@@ -58,7 +58,6 @@ func TestAppSec(t *testing.T) {
testCases := map[string]struct {
query string
variables map[string]any
- events map[string]string
}{
"basic": {
query: `query TestQuery($topLevelId: String!, $nestedId: String!) { topLevel(id: $topLevelId) { nested(id: $nestedId) } }`,
@@ -66,10 +65,6 @@ func TestAppSec(t *testing.T) {
"topLevelId": topLevelAttack,
"nestedId": nestedAttack,
},
- events: map[string]string{
- "test-rule-001": "graphql.resolve(topLevel)",
- "test-rule-002": "graphql.resolve(nested)",
- },
},
"with-default-parameter": {
query: fmt.Sprintf(`query TestQuery($topLevelId: String = %#v, $nestedId: String!) { topLevel(id: $topLevelId) { nested(id: $nestedId) } }`, topLevelAttack),
@@ -77,10 +72,6 @@ func TestAppSec(t *testing.T) {
// "topLevelId" omitted (default value used)
"nestedId": nestedAttack,
},
- events: map[string]string{
- "test-rule-001": "graphql.resolve(topLevel)",
- "test-rule-002": "graphql.resolve(nested)",
- },
},
"embedded-variable": {
query: `query TestQuery($topLevelId: String!, $nestedId: String!) {
@@ -92,10 +83,6 @@ func TestAppSec(t *testing.T) {
"topLevelId": topLevelAttack,
"nestedId": nestedAttack,
},
- events: map[string]string{
- "test-rule-001": "graphql.resolve(topLevelMapped)",
- "test-rule-002": "graphql.resolve(nested)",
- },
},
}
for name, tc := range testCases {
@@ -118,9 +105,9 @@ func TestAppSec(t *testing.T) {
require.NotEmpty(t, spans)
// The last finished span (which is GraphQL entry) should have the "_dd.appsec.enabled" tag.
- require.Equal(t, 1, spans[len(spans)-1].Tag("_dd.appsec.enabled"))
+ span := spans[len(spans)-1]
+ require.Equal(t, 1, span.Tag("_dd.appsec.enabled"))
- events := make(map[string]string)
type ddAppsecJSON struct {
Triggers []struct {
Rule struct {
@@ -129,34 +116,19 @@ func TestAppSec(t *testing.T) {
} `json:"triggers"`
}
- // Search for AppSec events in the set of spans
- for _, span := range spans {
- jsonText, ok := span.Tag("_dd.appsec.json").(string)
- if !ok || jsonText == "" {
- continue
- }
- var parsed ddAppsecJSON
- err := json.Unmarshal([]byte(jsonText), &parsed)
- require.NoError(t, err)
+ jsonText, ok := span.Tag("_dd.appsec.json").(string)
+ require.True(t, ok, "expected _dd.appsec.json tag on span")
- require.Len(t, parsed.Triggers, 1, "expected exactly 1 trigger on %s span", span.OperationName())
- ruleID := parsed.Triggers[0].Rule.ID
- _, duplicate := events[ruleID]
- require.False(t, duplicate, "found duplicated hit for rule %s", ruleID)
- var origin string
- switch name := span.OperationName(); name {
- case "graphql.field":
- field := span.Tag(tagGraphqlField).(string)
- origin = fmt.Sprintf("%s(%s)", "graphql.resolve", field)
- case "graphql.query":
- origin = "graphql.execute"
- default:
- require.Fail(t, "rule trigger recorded on unecpected span", "rule %s recorded a hit on unexpected span %s", ruleID, name)
- }
- events[ruleID] = origin
+ var parsed ddAppsecJSON
+ err = json.Unmarshal([]byte(jsonText), &parsed)
+ require.NoError(t, err)
+
+ ids := make([]string, 0, len(parsed.Triggers))
+ for _, trigger := range parsed.Triggers {
+ ids = append(ids, trigger.Rule.ID)
}
- // Ensure they match the expected outcome
- require.Equal(t, tc.events, events)
+
+ require.ElementsMatch(t, ids, []string{"test-rule-001", "test-rule-002"})
})
}
})
diff --git a/contrib/99designs/gqlgen/tracer.go b/contrib/99designs/gqlgen/tracer.go
index d6341d2a96..ffdcb0c550 100644
--- a/contrib/99designs/gqlgen/tracer.go
+++ b/contrib/99designs/gqlgen/tracer.go
@@ -50,7 +50,6 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/types"
"gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema"
"gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
@@ -104,12 +103,12 @@ func (t *gqlTracer) Validate(_ graphql.ExecutableSchema) error {
func (t *gqlTracer) InterceptOperation(ctx context.Context, next graphql.OperationHandler) graphql.ResponseHandler {
opCtx := graphql.GetOperationContext(ctx)
span, ctx := t.createRootSpan(ctx, opCtx)
- ctx, req := graphqlsec.StartRequestOperation(ctx, span, types.RequestOperationArgs{
+ ctx, req := graphqlsec.StartRequestOperation(ctx, graphqlsec.RequestOperationArgs{
RawQuery: opCtx.RawQuery,
OperationName: opCtx.OperationName,
Variables: opCtx.Variables,
})
- ctx, query := graphqlsec.StartExecutionOperation(ctx, span, types.ExecutionOperationArgs{
+ ctx, query := graphqlsec.StartExecutionOperation(ctx, graphqlsec.ExecutionOperationArgs{
Query: opCtx.RawQuery,
OperationName: opCtx.OperationName,
Variables: opCtx.Variables,
@@ -124,14 +123,21 @@ func (t *gqlTracer) InterceptOperation(ctx context.Context, next graphql.Operati
}
defer span.Finish(tracer.WithError(err))
}
- query.Finish(types.ExecutionOperationRes{
- Data: response.Data, // NB - This is raw data, but rather not parse it (possibly expensive).
- Error: response.Errors,
- })
- req.Finish(types.RequestOperationRes{
- Data: response.Data, // NB - This is raw data, but rather not parse it (possibly expensive).
- Error: response.Errors,
- })
+
+ var (
+ executionOperationRes graphqlsec.ExecutionOperationRes
+ requestOperationRes graphqlsec.RequestOperationRes
+ )
+ if response != nil {
+ executionOperationRes.Data = response.Data
+ executionOperationRes.Error = response.Errors
+
+ requestOperationRes.Data = response.Data
+ requestOperationRes.Error = response.Errors
+ }
+
+ query.Finish(executionOperationRes)
+ req.Finish(span, requestOperationRes)
return response
}
}
@@ -167,13 +173,13 @@ func (t *gqlTracer) InterceptField(ctx context.Context, next graphql.Resolver) (
span, ctx := tracer.StartSpanFromContext(ctx, fieldOp, opts...)
defer func() { span.Finish(tracer.WithError(err)) }()
- ctx, op := graphqlsec.StartResolveOperation(ctx, span, types.ResolveOperationArgs{
+ ctx, op := graphqlsec.StartResolveOperation(ctx, graphqlsec.ResolveOperationArgs{
Arguments: fieldCtx.Args,
TypeName: fieldCtx.Object,
FieldName: fieldCtx.Field.Name,
Trivial: isTrivial,
})
- defer func() { op.Finish(types.ResolveOperationRes{Data: res, Error: err}) }()
+ defer func() { op.Finish(graphqlsec.ResolveOperationRes{Data: res, Error: err}) }()
res, err = next(ctx)
return
diff --git a/contrib/99designs/gqlgen/tracer_test.go b/contrib/99designs/gqlgen/tracer_test.go
index 0517aef976..09e6eab628 100644
--- a/contrib/99designs/gqlgen/tracer_test.go
+++ b/contrib/99designs/gqlgen/tracer_test.go
@@ -261,3 +261,91 @@ func newTestClient(t *testing.T, h *testserver.TestServer, tracer graphql.Handle
h.Use(tracer)
return client.New(h)
}
+
+func TestInterceptOperation(t *testing.T) {
+ assertions := assert.New(t)
+ graphqlTestSrv := testserver.New()
+ c := newTestClient(t, graphqlTestSrv, NewTracer())
+
+ t.Run("intercept operation with graphQL Query", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ err := c.Post(`{ name }`, &testServerResponse{})
+ assertions.Nil(err)
+
+ allSpans := mt.FinishedSpans()
+ var root mocktracer.Span
+ var resNames []string
+ var opNames []string
+ for _, span := range allSpans {
+ if span.ParentID() == 0 {
+ root = span
+ }
+ resNames = append(resNames, span.Tag(ext.ResourceName).(string))
+ opNames = append(opNames, span.OperationName())
+ assertions.Equal("99designs/gqlgen", span.Tag(ext.Component))
+ }
+ assertions.ElementsMatch(resNames, []string{readOp, parsingOp, validationOp, "Query.name", `{ name }`})
+ assertions.ElementsMatch(opNames, []string{readOp, parsingOp, validationOp, fieldOp, "graphql.query"})
+ assertions.NotNil(root)
+ assertions.Nil(root.Tag(ext.Error))
+ })
+
+ t.Run("intercept operation with graphQL Mutation", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ err := c.Post(`mutation Name { name }`, &testServerResponse{})
+ // due to testserver.New() implementation, mutation is not supported
+ assertions.NotNil(err)
+
+ allSpans := mt.FinishedSpans()
+ var root mocktracer.Span
+ var resNames []string
+ var opNames []string
+ for _, span := range allSpans {
+ if span.ParentID() == 0 {
+ root = span
+ }
+ resNames = append(resNames, span.Tag(ext.ResourceName).(string))
+ opNames = append(opNames, span.OperationName())
+ assertions.Equal("99designs/gqlgen", span.Tag(ext.Component))
+ }
+ assertions.ElementsMatch(resNames, []string{readOp, parsingOp, validationOp, `mutation Name { name }`})
+ assertions.ElementsMatch(opNames, []string{readOp, parsingOp, validationOp, "graphql.mutation"})
+ assertions.NotNil(root)
+ assertions.NotNil(root.Tag(ext.Error))
+ })
+
+ t.Run("intercept operation with graphQL Subscription", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ go func() {
+ graphqlTestSrv.SendCompleteSubscriptionMessage()
+ }()
+
+ // using raw post because post try to access nil response's Data field
+ resp, err := c.RawPost(`subscription Name { name }`)
+ assertions.Nil(err)
+ assertions.Nil(resp)
+
+ allSpans := mt.FinishedSpans()
+ var root mocktracer.Span
+ var resNames []string
+ var opNames []string
+ for _, span := range allSpans {
+ if span.ParentID() == 0 {
+ root = span
+ }
+ resNames = append(resNames, span.Tag(ext.ResourceName).(string))
+ opNames = append(opNames, span.OperationName())
+ assertions.Equal("99designs/gqlgen", span.Tag(ext.Component))
+ }
+ assertions.ElementsMatch(resNames, []string{`subscription Name { name }`, `subscription Name { name }`, "subscription Name { name }"})
+ assertions.ElementsMatch(opNames, []string{readOp, parsingOp, validationOp})
+ assertions.NotNil(root)
+ assertions.Nil(root.Tag(ext.Error))
+ })
+}
diff --git a/contrib/aws/aws-sdk-go-v2/aws/aws.go b/contrib/aws/aws-sdk-go-v2/aws/aws.go
index 7a1d0b4e6a..2a2bbf5c38 100644
--- a/contrib/aws/aws-sdk-go-v2/aws/aws.go
+++ b/contrib/aws/aws-sdk-go-v2/aws/aws.go
@@ -31,6 +31,10 @@ import (
"github.com/aws/aws-sdk-go-v2/service/sqs"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
+
+ eventBridgeTracer "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/internal/eventbridge"
+ snsTracer "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/internal/sns"
+ sqsTracer "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/internal/sqs"
)
const componentName = "aws/aws-sdk-go-v2/aws"
@@ -105,6 +109,16 @@ func (mw *traceMiddleware) startTraceMiddleware(stack *middleware.Stack) error {
}
span, spanctx := tracer.StartSpanFromContext(ctx, spanName(serviceID, operation), opts...)
+ // Inject trace context
+ switch serviceID {
+ case "SQS":
+ sqsTracer.EnrichOperation(span, in, operation)
+ case "SNS":
+ snsTracer.EnrichOperation(span, in, operation)
+ case "EventBridge":
+ eventBridgeTracer.EnrichOperation(span, in, operation)
+ }
+
// Handle initialize and continue through the middleware chain.
out, metadata, err = next.HandleInitialize(spanctx, in)
if err != nil && (mw.cfg.errCheck == nil || mw.cfg.errCheck(err)) {
@@ -229,21 +243,23 @@ func tableName(requestInput middleware.InitializeInput) string {
func streamName(requestInput middleware.InitializeInput) string {
switch params := requestInput.Parameters.(type) {
case *kinesis.PutRecordInput:
- return *params.StreamName
+ return coalesceNameOrArnResource(params.StreamName, params.StreamARN)
case *kinesis.PutRecordsInput:
- return *params.StreamName
+ return coalesceNameOrArnResource(params.StreamName, params.StreamARN)
case *kinesis.AddTagsToStreamInput:
- return *params.StreamName
+ return coalesceNameOrArnResource(params.StreamName, params.StreamARN)
case *kinesis.RemoveTagsFromStreamInput:
- return *params.StreamName
+ return coalesceNameOrArnResource(params.StreamName, params.StreamARN)
case *kinesis.CreateStreamInput:
- return *params.StreamName
+ if params.StreamName != nil {
+ return *params.StreamName
+ }
case *kinesis.DeleteStreamInput:
- return *params.StreamName
+ return coalesceNameOrArnResource(params.StreamName, params.StreamARN)
case *kinesis.DescribeStreamInput:
- return *params.StreamName
+ return coalesceNameOrArnResource(params.StreamName, params.StreamARN)
case *kinesis.DescribeStreamSummaryInput:
- return *params.StreamName
+ return coalesceNameOrArnResource(params.StreamName, params.StreamARN)
case *kinesis.GetRecordsInput:
if params.StreamARN != nil {
streamArnValue := *params.StreamARN
@@ -353,3 +369,16 @@ func serviceName(cfg *config, awsService string) string {
defaultName := fmt.Sprintf("aws.%s", awsService)
return namingschema.ServiceNameOverrideV0(defaultName, defaultName)
}
+
+func coalesceNameOrArnResource(name *string, arnVal *string) string {
+ if name != nil {
+ return *name
+ }
+
+ if arnVal != nil {
+ parts := strings.Split(*arnVal, "/")
+ return parts[len(parts)-1]
+ }
+
+ return ""
+}
diff --git a/contrib/aws/aws-sdk-go-v2/aws/aws_test.go b/contrib/aws/aws-sdk-go-v2/aws/aws_test.go
index 3c45d1a1a7..09768a3f37 100644
--- a/contrib/aws/aws-sdk-go-v2/aws/aws_test.go
+++ b/contrib/aws/aws-sdk-go-v2/aws/aws_test.go
@@ -8,6 +8,7 @@ package aws
import (
"context"
"encoding/base64"
+ "encoding/json"
"net/http"
"net/http/httptest"
"net/url"
@@ -24,12 +25,14 @@ import (
"github.com/aws/aws-sdk-go-v2/service/dynamodb"
"github.com/aws/aws-sdk-go-v2/service/ec2"
"github.com/aws/aws-sdk-go-v2/service/eventbridge"
+ eventBridgeTypes "github.com/aws/aws-sdk-go-v2/service/eventbridge/types"
"github.com/aws/aws-sdk-go-v2/service/kinesis"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/sfn"
"github.com/aws/aws-sdk-go-v2/service/sns"
"github.com/aws/aws-sdk-go-v2/service/sqs"
- "github.com/aws/aws-sdk-go-v2/service/sqs/types"
+ sqsTypes "github.com/aws/aws-sdk-go-v2/service/sqs/types"
+ "github.com/aws/smithy-go/middleware"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -280,6 +283,66 @@ func TestAppendMiddlewareSqsReceiveMessage(t *testing.T) {
}
}
+func TestAppendMiddlewareSqsSendMessage(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ expectedStatusCode := 200
+ server := mockAWS(expectedStatusCode)
+ defer server.Close()
+
+ resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+ return aws.Endpoint{
+ PartitionID: "aws",
+ URL: server.URL,
+ SigningRegion: "eu-west-1",
+ }, nil
+ })
+
+ awsCfg := aws.Config{
+ Region: "eu-west-1",
+ Credentials: aws.AnonymousCredentials{},
+ EndpointResolver: resolver,
+ }
+
+ AppendMiddleware(&awsCfg)
+
+ sqsClient := sqs.NewFromConfig(awsCfg)
+ sendMessageInput := &sqs.SendMessageInput{
+ MessageBody: aws.String("test message"),
+ QueueUrl: aws.String("https://sqs.us-west-2.amazonaws.com/123456789012/MyQueueName"),
+ }
+ _, err := sqsClient.SendMessage(context.Background(), sendMessageInput)
+ require.NoError(t, err)
+
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 1)
+
+ s := spans[0]
+ assert.Equal(t, "SQS.request", s.OperationName())
+ assert.Equal(t, "SendMessage", s.Tag("aws.operation"))
+ assert.Equal(t, "SQS", s.Tag("aws.service"))
+ assert.Equal(t, "MyQueueName", s.Tag("queuename"))
+ assert.Equal(t, "SQS.SendMessage", s.Tag(ext.ResourceName))
+ assert.Equal(t, "aws.SQS", s.Tag(ext.ServiceName))
+
+ // Check for trace context injection
+ assert.NotNil(t, sendMessageInput.MessageAttributes)
+ assert.Contains(t, sendMessageInput.MessageAttributes, "_datadog")
+ ddAttr := sendMessageInput.MessageAttributes["_datadog"]
+ assert.Equal(t, "String", *ddAttr.DataType)
+ assert.NotEmpty(t, *ddAttr.StringValue)
+
+ // Decode and verify the injected trace context
+ var traceContext map[string]string
+ err = json.Unmarshal([]byte(*ddAttr.StringValue), &traceContext)
+ assert.NoError(t, err)
+ assert.Contains(t, traceContext, "x-datadog-trace-id")
+ assert.Contains(t, traceContext, "x-datadog-parent-id")
+ assert.NotEmpty(t, traceContext["x-datadog-trace-id"])
+ assert.NotEmpty(t, traceContext["x-datadog-parent-id"])
+}
+
func TestAppendMiddlewareS3ListObjects(t *testing.T) {
tests := []struct {
name string
@@ -440,6 +503,22 @@ func TestAppendMiddlewareSnsPublish(t *testing.T) {
assert.Equal(t, server.URL+"/", s.Tag(ext.HTTPURL))
assert.Equal(t, "aws/aws-sdk-go-v2/aws", s.Tag(ext.Component))
assert.Equal(t, ext.SpanKindClient, s.Tag(ext.SpanKind))
+
+ // Check for trace context injection
+ assert.NotNil(t, tt.publishInput.MessageAttributes)
+ assert.Contains(t, tt.publishInput.MessageAttributes, "_datadog")
+ ddAttr := tt.publishInput.MessageAttributes["_datadog"]
+ assert.Equal(t, "Binary", *ddAttr.DataType)
+ assert.NotEmpty(t, ddAttr.BinaryValue)
+
+ // Decode and verify the injected trace context
+ var traceContext map[string]string
+ err := json.Unmarshal(ddAttr.BinaryValue, &traceContext)
+ assert.NoError(t, err)
+ assert.Contains(t, traceContext, "x-datadog-trace-id")
+ assert.Contains(t, traceContext, "x-datadog-parent-id")
+ assert.NotEmpty(t, traceContext["x-datadog-trace-id"])
+ assert.NotEmpty(t, traceContext["x-datadog-parent-id"])
})
}
}
@@ -656,6 +735,62 @@ func TestAppendMiddlewareEventBridgePutRule(t *testing.T) {
}
}
+func TestAppendMiddlewareEventBridgePutEvents(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ expectedStatusCode := 200
+ server := mockAWS(expectedStatusCode)
+ defer server.Close()
+
+ resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+ return aws.Endpoint{
+ PartitionID: "aws",
+ URL: server.URL,
+ SigningRegion: "eu-west-1",
+ }, nil
+ })
+
+ awsCfg := aws.Config{
+ Region: "eu-west-1",
+ Credentials: aws.AnonymousCredentials{},
+ EndpointResolver: resolver,
+ }
+
+ AppendMiddleware(&awsCfg)
+
+ eventbridgeClient := eventbridge.NewFromConfig(awsCfg)
+ putEventsInput := &eventbridge.PutEventsInput{
+ Entries: []eventBridgeTypes.PutEventsRequestEntry{
+ {
+ EventBusName: aws.String("my-event-bus"),
+ Detail: aws.String(`{"key": "value"}`),
+ },
+ },
+ }
+ eventbridgeClient.PutEvents(context.Background(), putEventsInput)
+
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 1)
+
+ s := spans[0]
+ assert.Equal(t, "PutEvents", s.Tag("aws.operation"))
+ assert.Equal(t, "EventBridge.PutEvents", s.Tag(ext.ResourceName))
+
+ // Check for trace context injection
+ assert.Len(t, putEventsInput.Entries, 1)
+ entry := putEventsInput.Entries[0]
+ var detail map[string]interface{}
+ err := json.Unmarshal([]byte(*entry.Detail), &detail)
+ assert.NoError(t, err)
+ assert.Contains(t, detail, "_datadog")
+ ddData, ok := detail["_datadog"].(map[string]interface{})
+ assert.True(t, ok)
+ assert.Contains(t, ddData, "x-datadog-start-time")
+ assert.Contains(t, ddData, "x-datadog-resource-name")
+ assert.Equal(t, "my-event-bus", ddData["x-datadog-resource-name"])
+}
+
func TestAppendMiddlewareSfnDescribeStateMachine(t *testing.T) {
tests := []struct {
name string
@@ -970,8 +1105,8 @@ func TestMessagingNamingSchema(t *testing.T) {
_, err = sqsClient.SendMessage(ctx, msg)
require.NoError(t, err)
- entry := types.SendMessageBatchRequestEntry{Id: aws.String("1"), MessageBody: aws.String("body")}
- batchMsg := &sqs.SendMessageBatchInput{QueueUrl: sqsResp.QueueUrl, Entries: []types.SendMessageBatchRequestEntry{entry}}
+ entry := sqsTypes.SendMessageBatchRequestEntry{Id: aws.String("1"), MessageBody: aws.String("body")}
+ batchMsg := &sqs.SendMessageBatchInput{QueueUrl: sqsResp.QueueUrl, Entries: []sqsTypes.SendMessageBatchRequestEntry{entry}}
_, err = sqsClient.SendMessageBatch(ctx, batchMsg)
require.NoError(t, err)
@@ -1077,3 +1212,64 @@ func TestWithErrorCheck(t *testing.T) {
})
}
}
+
+func TestStreamName(t *testing.T) {
+ dummyName := `my-stream`
+ dummyArn := `arn:aws:kinesis:us-east-1:111111111111:stream/` + dummyName
+
+ tests := []struct {
+ name string
+ input any
+ expected string
+ }{
+ {
+ name: "PutRecords with ARN",
+ input: &kinesis.PutRecordsInput{StreamARN: &dummyArn},
+ expected: dummyName,
+ },
+ {
+ name: "PutRecords with Name",
+ input: &kinesis.PutRecordsInput{StreamName: &dummyName},
+ expected: dummyName,
+ },
+ {
+ name: "PutRecords with both",
+ input: &kinesis.PutRecordsInput{StreamName: &dummyName, StreamARN: &dummyArn},
+ expected: dummyName,
+ },
+ {
+ name: "PutRecord with Name",
+ input: &kinesis.PutRecordInput{StreamName: &dummyName},
+ expected: dummyName,
+ },
+ {
+ name: "CreateStream",
+ input: &kinesis.CreateStreamInput{StreamName: &dummyName},
+ expected: dummyName,
+ },
+ {
+ name: "CreateStream with nothing",
+ input: &kinesis.CreateStreamInput{},
+ expected: "",
+ },
+ {
+ name: "GetRecords",
+ input: &kinesis.GetRecordsInput{StreamARN: &dummyArn},
+ expected: dummyName,
+ },
+ {
+ name: "GetRecords with nothing",
+ input: &kinesis.GetRecordsInput{},
+ expected: "",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ req := middleware.InitializeInput{
+ Parameters: tt.input,
+ }
+ val := streamName(req)
+ assert.Equal(t, tt.expected, val)
+ })
+ }
+}
diff --git a/contrib/aws/internal/eventbridge/eventbridge.go b/contrib/aws/internal/eventbridge/eventbridge.go
new file mode 100644
index 0000000000..5a2a56068e
--- /dev/null
+++ b/contrib/aws/internal/eventbridge/eventbridge.go
@@ -0,0 +1,112 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package eventbridge
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/eventbridge"
+ "github.com/aws/aws-sdk-go-v2/service/eventbridge/types"
+ "github.com/aws/smithy-go/middleware"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+ "strconv"
+ "time"
+)
+
+const (
+ datadogKey = "_datadog"
+ startTimeKey = "x-datadog-start-time"
+ resourceNameKey = "x-datadog-resource-name"
+ maxSizeBytes = 256 * 1024 // 256 KB
+)
+
+func EnrichOperation(span tracer.Span, in middleware.InitializeInput, operation string) {
+ switch operation {
+ case "PutEvents":
+ handlePutEvents(span, in)
+ }
+}
+
+func handlePutEvents(span tracer.Span, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*eventbridge.PutEventsInput)
+ if !ok {
+ log.Debug("Unable to read PutEvents params")
+ return
+ }
+
+ // Create trace context
+ carrier := tracer.TextMapCarrier{}
+ err := tracer.Inject(span.Context(), carrier)
+ if err != nil {
+ log.Debug("Unable to inject trace context: %s", err)
+ return
+ }
+
+ // Add start time
+ startTimeMillis := time.Now().UnixMilli()
+ carrier[startTimeKey] = strconv.FormatInt(startTimeMillis, 10)
+
+ carrierJSON, err := json.Marshal(carrier)
+ if err != nil {
+ log.Debug("Unable to marshal trace context: %s", err)
+ return
+ }
+
+ // Remove last '}'
+ reusedTraceContext := string(carrierJSON[:len(carrierJSON)-1])
+
+ for i := range params.Entries {
+ injectTraceContext(reusedTraceContext, ¶ms.Entries[i])
+ }
+}
+
+func injectTraceContext(baseTraceContext string, entryPtr *types.PutEventsRequestEntry) {
+ if entryPtr == nil {
+ return
+ }
+
+ // Build the complete trace context
+ var traceContext string
+ if entryPtr.EventBusName != nil {
+ traceContext = fmt.Sprintf(`%s,"%s":"%s"}`, baseTraceContext, resourceNameKey, *entryPtr.EventBusName)
+ } else {
+ traceContext = baseTraceContext + "}"
+ }
+
+ // Get current detail string
+ var detail string
+ if entryPtr.Detail == nil || *entryPtr.Detail == "" {
+ detail = "{}"
+ } else {
+ detail = *entryPtr.Detail
+ }
+
+ // Basic JSON structure validation
+ if len(detail) < 2 || detail[len(detail)-1] != '}' {
+ log.Debug("Unable to parse detail JSON. Not injecting trace context into EventBridge payload.")
+ return
+ }
+
+ // Create new detail string
+ var newDetail string
+ if len(detail) > 2 {
+ // Case where detail is not empty
+ newDetail = fmt.Sprintf(`%s,"%s":%s}`, detail[:len(detail)-1], datadogKey, traceContext)
+ } else {
+ // Cae where detail is empty
+ newDetail = fmt.Sprintf(`{"%s":%s}`, datadogKey, traceContext)
+ }
+
+ // Check sizes
+ if len(newDetail) > maxSizeBytes {
+ log.Debug("Payload size too large to pass context")
+ return
+ }
+
+ entryPtr.Detail = aws.String(newDetail)
+}
diff --git a/contrib/aws/internal/eventbridge/eventbridge_test.go b/contrib/aws/internal/eventbridge/eventbridge_test.go
new file mode 100644
index 0000000000..77c9ab1e72
--- /dev/null
+++ b/contrib/aws/internal/eventbridge/eventbridge_test.go
@@ -0,0 +1,192 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package eventbridge
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/eventbridge"
+ "github.com/aws/aws-sdk-go-v2/service/eventbridge/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+ "strings"
+ "testing"
+)
+
+func TestEnrichOperation(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ span := tracer.StartSpan("test-span")
+
+ input := middleware.InitializeInput{
+ Parameters: &eventbridge.PutEventsInput{
+ Entries: []types.PutEventsRequestEntry{
+ {
+ Detail: aws.String(`{"@123": "value", "_foo": "bar"}`),
+ EventBusName: aws.String("test-bus"),
+ },
+ {
+ Detail: aws.String(`{"@123": "data", "_foo": "bar"}`),
+ EventBusName: aws.String("test-bus-2"),
+ },
+ },
+ },
+ }
+
+ EnrichOperation(span, input, "PutEvents")
+
+ params, ok := input.Parameters.(*eventbridge.PutEventsInput)
+ require.True(t, ok)
+ require.Len(t, params.Entries, 2)
+
+ for _, entry := range params.Entries {
+ var detail map[string]interface{}
+ err := json.Unmarshal([]byte(*entry.Detail), &detail)
+ require.NoError(t, err)
+
+ assert.Contains(t, detail, "@123") // make sure user data still exists
+ assert.Contains(t, detail, "_foo")
+ assert.Contains(t, detail, datadogKey)
+ ddData, ok := detail[datadogKey].(map[string]interface{})
+ require.True(t, ok)
+
+ assert.Contains(t, ddData, startTimeKey)
+ assert.Contains(t, ddData, resourceNameKey)
+ assert.Equal(t, *entry.EventBusName, ddData[resourceNameKey])
+ }
+}
+
+func TestInjectTraceContext(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ ctx := context.Background()
+ span, _ := tracer.StartSpanFromContext(ctx, "test-span")
+ baseTraceContext := fmt.Sprintf(`{"x-datadog-trace-id":"%d","x-datadog-parent-id":"%d","x-datadog-start-time":"123456789"`, span.Context().TraceID(), span.Context().SpanID())
+
+ tests := []struct {
+ name string
+ entry types.PutEventsRequestEntry
+ expected func(*testing.T, *types.PutEventsRequestEntry)
+ }{
+ {
+ name: "Inject into empty detail",
+ entry: types.PutEventsRequestEntry{
+ EventBusName: aws.String("test-bus"),
+ },
+ expected: func(t *testing.T, entry *types.PutEventsRequestEntry) {
+ assert.NotNil(t, entry.Detail)
+ var detail map[string]interface{}
+ err := json.Unmarshal([]byte(*entry.Detail), &detail)
+ require.NoError(t, err)
+ assert.Contains(t, detail, datadogKey)
+ },
+ },
+ {
+ name: "Inject into existing detail",
+ entry: types.PutEventsRequestEntry{
+ Detail: aws.String(`{"existing": "data"}`),
+ EventBusName: aws.String("test-bus"),
+ },
+ expected: func(t *testing.T, entry *types.PutEventsRequestEntry) {
+ var detail map[string]interface{}
+ err := json.Unmarshal([]byte(*entry.Detail), &detail)
+ require.NoError(t, err)
+ assert.Contains(t, detail, "existing")
+ assert.Equal(t, "data", detail["existing"])
+ assert.Contains(t, detail, datadogKey)
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ injectTraceContext(baseTraceContext, &tt.entry)
+ tt.expected(t, &tt.entry)
+
+ var detail map[string]interface{}
+ err := json.Unmarshal([]byte(*tt.entry.Detail), &detail)
+ require.NoError(t, err)
+
+ ddData := detail[datadogKey].(map[string]interface{})
+ assert.Contains(t, ddData, startTimeKey)
+ assert.Contains(t, ddData, resourceNameKey)
+ assert.Equal(t, *tt.entry.EventBusName, ddData[resourceNameKey])
+
+ // Check that start time exists and is not empty
+ startTime, ok := ddData[startTimeKey]
+ assert.True(t, ok)
+ assert.Equal(t, startTime, "123456789")
+
+ carrier := tracer.TextMapCarrier{}
+ for k, v := range ddData {
+ if s, ok := v.(string); ok {
+ carrier[k] = s
+ }
+ }
+
+ extractedSpanContext, err := tracer.Extract(&carrier)
+ assert.NoError(t, err)
+ assert.Equal(t, span.Context().TraceID(), extractedSpanContext.TraceID())
+ assert.Equal(t, span.Context().SpanID(), extractedSpanContext.SpanID())
+ })
+ }
+}
+
+func TestInjectTraceContextSizeLimit(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ baseTraceContext := `{"x-datadog-trace-id":"12345","x-datadog-parent-id":"67890","x-datadog-start-time":"123456789"`
+
+ tests := []struct {
+ name string
+ entry types.PutEventsRequestEntry
+ expected func(*testing.T, *types.PutEventsRequestEntry)
+ }{
+ {
+ name: "Do not inject when payload is too large",
+ entry: types.PutEventsRequestEntry{
+ Detail: aws.String(`{"large": "` + strings.Repeat("a", maxSizeBytes-50) + `"}`),
+ EventBusName: aws.String("test-bus"),
+ },
+ expected: func(t *testing.T, entry *types.PutEventsRequestEntry) {
+ assert.GreaterOrEqual(t, len(*entry.Detail), maxSizeBytes-50)
+ assert.NotContains(t, *entry.Detail, datadogKey)
+ assert.True(t, strings.HasPrefix(*entry.Detail, `{"large": "`))
+ assert.True(t, strings.HasSuffix(*entry.Detail, `"}`))
+ },
+ },
+ {
+ name: "Inject when payload is just under the limit",
+ entry: types.PutEventsRequestEntry{
+ Detail: aws.String(`{"large": "` + strings.Repeat("a", maxSizeBytes-1000) + `"}`),
+ EventBusName: aws.String("test-bus"),
+ },
+ expected: func(t *testing.T, entry *types.PutEventsRequestEntry) {
+ assert.Less(t, len(*entry.Detail), maxSizeBytes)
+ var detail map[string]interface{}
+ err := json.Unmarshal([]byte(*entry.Detail), &detail)
+ require.NoError(t, err)
+ assert.Contains(t, detail, datadogKey)
+ assert.Contains(t, detail, "large")
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ injectTraceContext(baseTraceContext, &tt.entry)
+ tt.expected(t, &tt.entry)
+ })
+ }
+}
diff --git a/contrib/aws/internal/sns/sns.go b/contrib/aws/internal/sns/sns.go
new file mode 100644
index 0000000000..b40ca5ea85
--- /dev/null
+++ b/contrib/aws/internal/sns/sns.go
@@ -0,0 +1,105 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package sns
+
+import (
+ "encoding/json"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/sns"
+ "github.com/aws/aws-sdk-go-v2/service/sns/types"
+ "github.com/aws/smithy-go/middleware"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+const (
+ datadogKey = "_datadog"
+ maxMessageAttributes = 10
+)
+
+func EnrichOperation(span tracer.Span, in middleware.InitializeInput, operation string) {
+ switch operation {
+ case "Publish":
+ handlePublish(span, in)
+ case "PublishBatch":
+ handlePublishBatch(span, in)
+ }
+}
+
+func handlePublish(span tracer.Span, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sns.PublishInput)
+ if !ok {
+ log.Debug("Unable to read PublishInput params")
+ return
+ }
+
+ traceContext, err := getTraceContext(span)
+ if err != nil {
+ log.Debug("Unable to get trace context: %s", err.Error())
+ return
+ }
+
+ if params.MessageAttributes == nil {
+ params.MessageAttributes = make(map[string]types.MessageAttributeValue)
+ }
+
+ injectTraceContext(traceContext, params.MessageAttributes)
+}
+
+func handlePublishBatch(span tracer.Span, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sns.PublishBatchInput)
+ if !ok {
+ log.Debug("Unable to read PublishBatch params")
+ return
+ }
+
+ traceContext, err := getTraceContext(span)
+ if err != nil {
+ log.Debug("Unable to get trace context: %s", err.Error())
+ return
+ }
+
+ for i := range params.PublishBatchRequestEntries {
+ if params.PublishBatchRequestEntries[i].MessageAttributes == nil {
+ params.PublishBatchRequestEntries[i].MessageAttributes = make(map[string]types.MessageAttributeValue)
+ }
+ injectTraceContext(traceContext, params.PublishBatchRequestEntries[i].MessageAttributes)
+ }
+}
+
+func getTraceContext(span tracer.Span) (types.MessageAttributeValue, error) {
+ carrier := tracer.TextMapCarrier{}
+ err := tracer.Inject(span.Context(), carrier)
+ if err != nil {
+ return types.MessageAttributeValue{}, err
+ }
+
+ jsonBytes, err := json.Marshal(carrier)
+ if err != nil {
+ return types.MessageAttributeValue{}, err
+ }
+
+ // Use Binary since SNS subscription filter policies fail silently with JSON
+ // strings. https://github.com/DataDog/datadog-lambda-js/pull/269
+ attribute := types.MessageAttributeValue{
+ DataType: aws.String("Binary"),
+ BinaryValue: jsonBytes,
+ }
+
+ return attribute, nil
+}
+
+func injectTraceContext(traceContext types.MessageAttributeValue, messageAttributes map[string]types.MessageAttributeValue) {
+ // SNS only allows a maximum of 10 message attributes.
+ // https://docs.aws.amazon.com/sns/latest/dg/sns-message-attributes.html
+ // Only inject if there's room.
+ if len(messageAttributes) >= maxMessageAttributes {
+ log.Info("Cannot inject trace context: message already has maximum allowed attributes")
+ return
+ }
+
+ messageAttributes[datadogKey] = traceContext
+}
diff --git a/contrib/aws/internal/sns/sns_test.go b/contrib/aws/internal/sns/sns_test.go
new file mode 100644
index 0000000000..0f955680f0
--- /dev/null
+++ b/contrib/aws/internal/sns/sns_test.go
@@ -0,0 +1,177 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package sns
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/sns"
+ "github.com/aws/aws-sdk-go-v2/service/sns/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func TestEnrichOperation(t *testing.T) {
+ tests := []struct {
+ name string
+ operation string
+ input middleware.InitializeInput
+ setup func(context.Context) tracer.Span
+ check func(*testing.T, middleware.InitializeInput)
+ }{
+ {
+ name: "Publish",
+ operation: "Publish",
+ input: middleware.InitializeInput{
+ Parameters: &sns.PublishInput{
+ Message: aws.String("test message"),
+ TopicArn: aws.String("arn:aws:sns:us-east-1:123456789012:test-topic"),
+ },
+ },
+ setup: func(ctx context.Context) tracer.Span {
+ span, _ := tracer.StartSpanFromContext(ctx, "test-span")
+ return span
+ },
+ check: func(t *testing.T, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sns.PublishInput)
+ require.True(t, ok)
+ require.NotNil(t, params)
+ require.NotNil(t, params.MessageAttributes)
+ assert.Contains(t, params.MessageAttributes, datadogKey)
+ assert.NotNil(t, params.MessageAttributes[datadogKey].DataType)
+ assert.Equal(t, "Binary", *params.MessageAttributes[datadogKey].DataType)
+ assert.NotNil(t, params.MessageAttributes[datadogKey].BinaryValue)
+ assert.NotEmpty(t, params.MessageAttributes[datadogKey].BinaryValue)
+ },
+ },
+ {
+ name: "PublishBatch",
+ operation: "PublishBatch",
+ input: middleware.InitializeInput{
+ Parameters: &sns.PublishBatchInput{
+ TopicArn: aws.String("arn:aws:sns:us-east-1:123456789012:test-topic"),
+ PublishBatchRequestEntries: []types.PublishBatchRequestEntry{
+ {
+ Id: aws.String("1"),
+ Message: aws.String("test message 1"),
+ },
+ {
+ Id: aws.String("2"),
+ Message: aws.String("test message 2"),
+ },
+ },
+ },
+ },
+ setup: func(ctx context.Context) tracer.Span {
+ span, _ := tracer.StartSpanFromContext(ctx, "test-span")
+ return span
+ },
+ check: func(t *testing.T, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sns.PublishBatchInput)
+ require.True(t, ok)
+ require.NotNil(t, params)
+ require.NotNil(t, params.PublishBatchRequestEntries)
+ require.Len(t, params.PublishBatchRequestEntries, 2)
+
+ for _, entry := range params.PublishBatchRequestEntries {
+ require.NotNil(t, entry.MessageAttributes)
+ assert.Contains(t, entry.MessageAttributes, datadogKey)
+ assert.NotNil(t, entry.MessageAttributes[datadogKey].DataType)
+ assert.Equal(t, "Binary", *entry.MessageAttributes[datadogKey].DataType)
+ assert.NotNil(t, entry.MessageAttributes[datadogKey].BinaryValue)
+ assert.NotEmpty(t, entry.MessageAttributes[datadogKey].BinaryValue)
+ }
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ ctx := context.Background()
+ span := tt.setup(ctx)
+
+ EnrichOperation(span, tt.input, tt.operation)
+
+ if tt.check != nil {
+ tt.check(t, tt.input)
+ }
+ })
+ }
+}
+
+func TestInjectTraceContext(t *testing.T) {
+ tests := []struct {
+ name string
+ existingAttributes int
+ expectInjection bool
+ }{
+ {
+ name: "Inject with no existing attributes",
+ existingAttributes: 0,
+ expectInjection: true,
+ },
+ {
+ name: "Inject with some existing attributes",
+ existingAttributes: 5,
+ expectInjection: true,
+ },
+ {
+ name: "No injection when at max attributes",
+ existingAttributes: maxMessageAttributes,
+ expectInjection: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ span := tracer.StartSpan("test-span")
+
+ messageAttributes := make(map[string]types.MessageAttributeValue)
+ for i := 0; i < tt.existingAttributes; i++ {
+ messageAttributes[fmt.Sprintf("attr%d", i)] = types.MessageAttributeValue{
+ DataType: aws.String("String"),
+ StringValue: aws.String("value"),
+ }
+ }
+
+ traceContext, err := getTraceContext(span)
+ assert.NoError(t, err)
+ injectTraceContext(traceContext, messageAttributes)
+
+ if tt.expectInjection {
+ assert.Contains(t, messageAttributes, datadogKey)
+ assert.NotNil(t, messageAttributes[datadogKey].DataType)
+ assert.Equal(t, "Binary", *messageAttributes[datadogKey].DataType)
+ assert.NotNil(t, messageAttributes[datadogKey].BinaryValue)
+ assert.NotEmpty(t, messageAttributes[datadogKey].BinaryValue)
+
+ carrier := tracer.TextMapCarrier{}
+ err := json.Unmarshal(messageAttributes[datadogKey].BinaryValue, &carrier)
+ assert.NoError(t, err)
+
+ extractedSpanContext, err := tracer.Extract(carrier)
+ assert.NoError(t, err)
+ assert.Equal(t, span.Context().TraceID(), extractedSpanContext.TraceID())
+ assert.Equal(t, span.Context().SpanID(), extractedSpanContext.SpanID())
+ } else {
+ assert.NotContains(t, messageAttributes, datadogKey)
+ }
+ })
+ }
+}
diff --git a/contrib/aws/internal/sqs/sqs.go b/contrib/aws/internal/sqs/sqs.go
new file mode 100644
index 0000000000..9fbd8a9f90
--- /dev/null
+++ b/contrib/aws/internal/sqs/sqs.go
@@ -0,0 +1,103 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package sqs
+
+import (
+ "encoding/json"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/sqs"
+ "github.com/aws/aws-sdk-go-v2/service/sqs/types"
+ "github.com/aws/smithy-go/middleware"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+const (
+ datadogKey = "_datadog"
+ maxMessageAttributes = 10
+)
+
+func EnrichOperation(span tracer.Span, in middleware.InitializeInput, operation string) {
+ switch operation {
+ case "SendMessage":
+ handleSendMessage(span, in)
+ case "SendMessageBatch":
+ handleSendMessageBatch(span, in)
+ }
+}
+
+func handleSendMessage(span tracer.Span, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sqs.SendMessageInput)
+ if !ok {
+ log.Debug("Unable to read SendMessage params")
+ return
+ }
+
+ traceContext, err := getTraceContext(span)
+ if err != nil {
+ log.Debug("Unable to get trace context: %s", err.Error())
+ return
+ }
+
+ if params.MessageAttributes == nil {
+ params.MessageAttributes = make(map[string]types.MessageAttributeValue)
+ }
+
+ injectTraceContext(traceContext, params.MessageAttributes)
+}
+
+func handleSendMessageBatch(span tracer.Span, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sqs.SendMessageBatchInput)
+ if !ok {
+ log.Debug("Unable to read SendMessageBatch params")
+ return
+ }
+
+ traceContext, err := getTraceContext(span)
+ if err != nil {
+ log.Debug("Unable to get trace context: %s", err.Error())
+ return
+ }
+
+ for i := range params.Entries {
+ if params.Entries[i].MessageAttributes == nil {
+ params.Entries[i].MessageAttributes = make(map[string]types.MessageAttributeValue)
+ }
+ injectTraceContext(traceContext, params.Entries[i].MessageAttributes)
+ }
+}
+
+func getTraceContext(span tracer.Span) (types.MessageAttributeValue, error) {
+ carrier := tracer.TextMapCarrier{}
+ err := tracer.Inject(span.Context(), carrier)
+ if err != nil {
+ return types.MessageAttributeValue{}, err
+ }
+
+ jsonBytes, err := json.Marshal(carrier)
+ if err != nil {
+ return types.MessageAttributeValue{}, err
+ }
+
+ attribute := types.MessageAttributeValue{
+ DataType: aws.String("String"),
+ StringValue: aws.String(string(jsonBytes)),
+ }
+
+ return attribute, nil
+}
+
+func injectTraceContext(traceContext types.MessageAttributeValue, messageAttributes map[string]types.MessageAttributeValue) {
+ // SQS only allows a maximum of 10 message attributes.
+ // https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes
+ // Only inject if there's room.
+ if len(messageAttributes) >= maxMessageAttributes {
+ log.Info("Cannot inject trace context: message already has maximum allowed attributes")
+ return
+ }
+
+ messageAttributes[datadogKey] = traceContext
+}
diff --git a/contrib/aws/internal/sqs/sqs_test.go b/contrib/aws/internal/sqs/sqs_test.go
new file mode 100644
index 0000000000..1a66adab09
--- /dev/null
+++ b/contrib/aws/internal/sqs/sqs_test.go
@@ -0,0 +1,181 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package sqs
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/service/sqs/types"
+ "testing"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/sqs"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func TestEnrichOperation(t *testing.T) {
+ tests := []struct {
+ name string
+ operation string
+ input middleware.InitializeInput
+ setup func(context.Context) tracer.Span
+ check func(*testing.T, middleware.InitializeInput)
+ }{
+ {
+ name: "SendMessage",
+ operation: "SendMessage",
+ input: middleware.InitializeInput{
+ Parameters: &sqs.SendMessageInput{
+ MessageBody: aws.String("test message"),
+ QueueUrl: aws.String("https://sqs.us-east-1.amazonaws.com/1234567890/test-queue"),
+ },
+ },
+ setup: func(ctx context.Context) tracer.Span {
+ span, _ := tracer.StartSpanFromContext(ctx, "test-span")
+ return span
+ },
+ check: func(t *testing.T, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sqs.SendMessageInput)
+ require.True(t, ok)
+ require.NotNil(t, params)
+ require.NotNil(t, params.MessageAttributes)
+ assert.Contains(t, params.MessageAttributes, datadogKey)
+ assert.NotNil(t, params.MessageAttributes[datadogKey].DataType)
+ assert.Equal(t, "String", *params.MessageAttributes[datadogKey].DataType)
+ assert.NotNil(t, params.MessageAttributes[datadogKey].StringValue)
+ assert.NotEmpty(t, *params.MessageAttributes[datadogKey].StringValue)
+ },
+ },
+ {
+ name: "SendMessageBatch",
+ operation: "SendMessageBatch",
+ input: middleware.InitializeInput{
+ Parameters: &sqs.SendMessageBatchInput{
+ QueueUrl: aws.String("https://sqs.us-east-1.amazonaws.com/1234567890/test-queue"),
+ Entries: []types.SendMessageBatchRequestEntry{
+ {
+ Id: aws.String("1"),
+ MessageBody: aws.String("test message 1"),
+ },
+ {
+ Id: aws.String("2"),
+ MessageBody: aws.String("test message 2"),
+ },
+ {
+ Id: aws.String("3"),
+ MessageBody: aws.String("test message 3"),
+ },
+ },
+ },
+ },
+ setup: func(ctx context.Context) tracer.Span {
+ span, _ := tracer.StartSpanFromContext(ctx, "test-span")
+ return span
+ },
+ check: func(t *testing.T, in middleware.InitializeInput) {
+ params, ok := in.Parameters.(*sqs.SendMessageBatchInput)
+ require.True(t, ok)
+ require.NotNil(t, params)
+ require.NotNil(t, params.Entries)
+ require.Len(t, params.Entries, 3)
+
+ for _, entry := range params.Entries {
+ require.NotNil(t, entry.MessageAttributes)
+ assert.Contains(t, entry.MessageAttributes, datadogKey)
+ assert.NotNil(t, entry.MessageAttributes[datadogKey].DataType)
+ assert.Equal(t, "String", *entry.MessageAttributes[datadogKey].DataType)
+ assert.NotNil(t, entry.MessageAttributes[datadogKey].StringValue)
+ assert.NotEmpty(t, *entry.MessageAttributes[datadogKey].StringValue)
+ }
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ ctx := context.Background()
+ span := tt.setup(ctx)
+
+ EnrichOperation(span, tt.input, tt.operation)
+
+ if tt.check != nil {
+ tt.check(t, tt.input)
+ }
+ })
+ }
+}
+
+func TestInjectTraceContext(t *testing.T) {
+ tests := []struct {
+ name string
+ existingAttributes int
+ expectInjection bool
+ }{
+ {
+ name: "Inject with no existing attributes",
+ existingAttributes: 0,
+ expectInjection: true,
+ },
+ {
+ name: "Inject with some existing attributes",
+ existingAttributes: 5,
+ expectInjection: true,
+ },
+ {
+ name: "No injection when at max attributes",
+ existingAttributes: maxMessageAttributes,
+ expectInjection: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ span := tracer.StartSpan("test-span")
+
+ messageAttributes := make(map[string]types.MessageAttributeValue)
+ for i := 0; i < tt.existingAttributes; i++ {
+ messageAttributes[fmt.Sprintf("attr%d", i)] = types.MessageAttributeValue{
+ DataType: aws.String("String"),
+ StringValue: aws.String("value"),
+ }
+ }
+
+ traceContext, err := getTraceContext(span)
+ assert.NoError(t, err)
+ injectTraceContext(traceContext, messageAttributes)
+
+ if tt.expectInjection {
+ assert.Contains(t, messageAttributes, datadogKey)
+ assert.NotNil(t, messageAttributes[datadogKey].DataType)
+ assert.Equal(t, "String", *messageAttributes[datadogKey].DataType)
+ assert.NotNil(t, messageAttributes[datadogKey].StringValue)
+ assert.NotEmpty(t, *messageAttributes[datadogKey].StringValue)
+
+ carrier := tracer.TextMapCarrier{}
+ err := json.Unmarshal([]byte(*messageAttributes[datadogKey].StringValue), &carrier)
+ assert.NoError(t, err)
+
+ extractedSpanContext, err := tracer.Extract(carrier)
+ assert.NoError(t, err)
+ assert.Equal(t, span.Context().TraceID(), extractedSpanContext.TraceID())
+ assert.Equal(t, span.Context().SpanID(), extractedSpanContext.SpanID())
+ } else {
+ assert.NotContains(t, messageAttributes, datadogKey)
+ }
+ })
+ }
+}
diff --git a/contrib/cloud.google.com/go/pubsub.v1/internal/tracing/config.go b/contrib/cloud.google.com/go/pubsub.v1/internal/tracing/config.go
new file mode 100644
index 0000000000..b5b04b19a8
--- /dev/null
+++ b/contrib/cloud.google.com/go/pubsub.v1/internal/tracing/config.go
@@ -0,0 +1,43 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package tracing
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema"
+)
+
+type config struct {
+ serviceName string
+ publishSpanName string
+ receiveSpanName string
+ measured bool
+}
+
+func defaultConfig() *config {
+ return &config{
+ serviceName: namingschema.ServiceNameOverrideV0("", ""),
+ publishSpanName: namingschema.OpName(namingschema.GCPPubSubOutbound),
+ receiveSpanName: namingschema.OpName(namingschema.GCPPubSubInbound),
+ measured: false,
+ }
+}
+
+// Option is used to customize spans started by WrapReceiveHandler or Publish.
+type Option func(cfg *config)
+
+// WithServiceName sets the service name tag for traces started by WrapReceiveHandler or Publish.
+func WithServiceName(serviceName string) Option {
+ return func(cfg *config) {
+ cfg.serviceName = serviceName
+ }
+}
+
+// WithMeasured sets the measured tag for traces started by WrapReceiveHandler or Publish.
+func WithMeasured() Option {
+ return func(cfg *config) {
+ cfg.measured = true
+ }
+}
diff --git a/contrib/cloud.google.com/go/pubsub.v1/internal/tracing/tracing.go b/contrib/cloud.google.com/go/pubsub.v1/internal/tracing/tracing.go
new file mode 100644
index 0000000000..43633fbf48
--- /dev/null
+++ b/contrib/cloud.google.com/go/pubsub.v1/internal/tracing/tracing.go
@@ -0,0 +1,127 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+// Package tracing contains tracing logic for the cloud.google.com/go/pubsub.v1 instrumentation.
+//
+// WARNING: this package SHOULD NOT import cloud.google.com/go/pubsub.
+//
+// The motivation of this package is to support orchestrion, which cannot use the main package because it imports
+// the cloud.google.com/go/pubsub package, and since orchestrion modifies the library code itself,
+// this would cause an import cycle.
+package tracing
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
+)
+
+const componentName = "cloud.google.com/go/pubsub.v1"
+
+func init() {
+ telemetry.LoadIntegration(componentName)
+ tracer.MarkIntegrationImported(componentName)
+}
+
+type Message struct {
+ ID string
+ Data []byte
+ OrderingKey string
+ Attributes map[string]string
+ DeliveryAttempt *int
+ PublishTime time.Time
+}
+
+type Topic interface {
+ String() string
+}
+
+type Subscription interface {
+ String() string
+}
+
+func TracePublish(ctx context.Context, topic Topic, msg *Message, opts ...Option) (context.Context, func(serverID string, err error)) {
+ cfg := defaultConfig()
+ for _, opt := range opts {
+ opt(cfg)
+ }
+ spanOpts := []ddtrace.StartSpanOption{
+ tracer.ResourceName(topic.String()),
+ tracer.SpanType(ext.SpanTypeMessageProducer),
+ tracer.Tag("message_size", len(msg.Data)),
+ tracer.Tag("ordering_key", msg.OrderingKey),
+ tracer.Tag(ext.Component, componentName),
+ tracer.Tag(ext.SpanKind, ext.SpanKindProducer),
+ tracer.Tag(ext.MessagingSystem, ext.MessagingSystemGCPPubsub),
+ }
+ if cfg.serviceName != "" {
+ spanOpts = append(spanOpts, tracer.ServiceName(cfg.serviceName))
+ }
+ if cfg.measured {
+ spanOpts = append(spanOpts, tracer.Measured())
+ }
+ span, ctx := tracer.StartSpanFromContext(
+ ctx,
+ cfg.publishSpanName,
+ spanOpts...,
+ )
+ if msg.Attributes == nil {
+ msg.Attributes = make(map[string]string)
+ }
+ if err := tracer.Inject(span.Context(), tracer.TextMapCarrier(msg.Attributes)); err != nil {
+ log.Debug("contrib/cloud.google.com/go/pubsub.v1/trace: failed injecting tracing attributes: %v", err)
+ }
+ span.SetTag("num_attributes", len(msg.Attributes))
+
+ var once sync.Once
+ closeSpan := func(serverID string, err error) {
+ once.Do(func() {
+ span.SetTag("server_id", serverID)
+ span.Finish(tracer.WithError(err))
+ })
+ }
+ return ctx, closeSpan
+}
+
+func TraceReceiveFunc(s Subscription, opts ...Option) func(ctx context.Context, msg *Message) (context.Context, func()) {
+ cfg := defaultConfig()
+ for _, opt := range opts {
+ opt(cfg)
+ }
+ log.Debug("contrib/cloud.google.com/go/pubsub.v1/trace: Wrapping Receive Handler: %#v", cfg)
+ return func(ctx context.Context, msg *Message) (context.Context, func()) {
+ parentSpanCtx, _ := tracer.Extract(tracer.TextMapCarrier(msg.Attributes))
+ opts := []ddtrace.StartSpanOption{
+ tracer.ResourceName(s.String()),
+ tracer.SpanType(ext.SpanTypeMessageConsumer),
+ tracer.Tag("message_size", len(msg.Data)),
+ tracer.Tag("num_attributes", len(msg.Attributes)),
+ tracer.Tag("ordering_key", msg.OrderingKey),
+ tracer.Tag("message_id", msg.ID),
+ tracer.Tag("publish_time", msg.PublishTime.String()),
+ tracer.Tag(ext.Component, componentName),
+ tracer.Tag(ext.SpanKind, ext.SpanKindConsumer),
+ tracer.Tag(ext.MessagingSystem, ext.MessagingSystemGCPPubsub),
+ tracer.ChildOf(parentSpanCtx),
+ }
+ if cfg.serviceName != "" {
+ opts = append(opts, tracer.ServiceName(cfg.serviceName))
+ }
+ if cfg.measured {
+ opts = append(opts, tracer.Measured())
+ }
+ span, ctx := tracer.StartSpanFromContext(ctx, cfg.receiveSpanName, opts...)
+ if msg.DeliveryAttempt != nil {
+ span.SetTag("delivery_attempt", *msg.DeliveryAttempt)
+ }
+ return ctx, func() { span.Finish() }
+ }
+}
diff --git a/contrib/cloud.google.com/go/pubsub.v1/option.go b/contrib/cloud.google.com/go/pubsub.v1/option.go
index 3e8d8b3c29..3820859a12 100644
--- a/contrib/cloud.google.com/go/pubsub.v1/option.go
+++ b/contrib/cloud.google.com/go/pubsub.v1/option.go
@@ -6,41 +6,17 @@
package pubsub
import (
- "gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema"
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/cloud.google.com/go/pubsub.v1/internal/tracing"
)
-type config struct {
- serviceName string
- publishSpanName string
- receiveSpanName string
- measured bool
-}
+// Option is used to customize spans started by WrapReceiveHandler or Publish.
+type Option = tracing.Option
-func defaultConfig() *config {
- return &config{
- serviceName: namingschema.ServiceNameOverrideV0("", ""),
- publishSpanName: namingschema.OpName(namingschema.GCPPubSubOutbound),
- receiveSpanName: namingschema.OpName(namingschema.GCPPubSubInbound),
- measured: false,
- }
-}
-
-// A Option is used to customize spans started by WrapReceiveHandler or Publish.
-type Option func(cfg *config)
-
-// A ReceiveOption has been deprecated in favor of Option.
+// Deprecated: ReceiveOption has been deprecated in favor of Option.
type ReceiveOption = Option
// WithServiceName sets the service name tag for traces started by WrapReceiveHandler or Publish.
-func WithServiceName(serviceName string) Option {
- return func(cfg *config) {
- cfg.serviceName = serviceName
- }
-}
+var WithServiceName = tracing.WithServiceName
// WithMeasured sets the measured tag for traces started by WrapReceiveHandler or Publish.
-func WithMeasured() Option {
- return func(cfg *config) {
- cfg.measured = true
- }
-}
+var WithMeasured = tracing.WithMeasured
diff --git a/contrib/cloud.google.com/go/pubsub.v1/pubsub.go b/contrib/cloud.google.com/go/pubsub.v1/pubsub.go
index feb10a860e..1f01965090 100644
--- a/contrib/cloud.google.com/go/pubsub.v1/pubsub.go
+++ b/contrib/cloud.google.com/go/pubsub.v1/pubsub.go
@@ -8,23 +8,11 @@ package pubsub
import (
"context"
- "sync"
-
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
"cloud.google.com/go/pubsub"
-)
-
-const componentName = "cloud.google.com/go/pubsub.v1"
-func init() {
- telemetry.LoadIntegration(componentName)
- tracer.MarkIntegrationImported(componentName)
-}
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/cloud.google.com/go/pubsub.v1/internal/tracing"
+)
// Publish publishes a message on the specified topic and returns a PublishResult.
// This function is functionally equivalent to t.Publish(ctx, msg), but it also starts a publish
@@ -33,58 +21,27 @@ func init() {
// It is required to call (*PublishResult).Get(ctx) on the value returned by Publish to complete
// the span.
func Publish(ctx context.Context, t *pubsub.Topic, msg *pubsub.Message, opts ...Option) *PublishResult {
- cfg := defaultConfig()
- for _, opt := range opts {
- opt(cfg)
- }
- spanOpts := []ddtrace.StartSpanOption{
- tracer.ResourceName(t.String()),
- tracer.SpanType(ext.SpanTypeMessageProducer),
- tracer.Tag("message_size", len(msg.Data)),
- tracer.Tag("ordering_key", msg.OrderingKey),
- tracer.Tag(ext.Component, componentName),
- tracer.Tag(ext.SpanKind, ext.SpanKindProducer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemGCPPubsub),
- }
- if cfg.serviceName != "" {
- spanOpts = append(spanOpts, tracer.ServiceName(cfg.serviceName))
- }
- if cfg.measured {
- spanOpts = append(spanOpts, tracer.Measured())
- }
- span, ctx := tracer.StartSpanFromContext(
- ctx,
- cfg.publishSpanName,
- spanOpts...,
- )
- if msg.Attributes == nil {
- msg.Attributes = make(map[string]string)
- }
- if err := tracer.Inject(span.Context(), tracer.TextMapCarrier(msg.Attributes)); err != nil {
- log.Debug("contrib/cloud.google.com/go/pubsub.v1/: failed injecting tracing attributes: %v", err)
- }
- span.SetTag("num_attributes", len(msg.Attributes))
+ traceMsg := newTraceMessage(msg)
+ ctx, closeSpan := tracing.TracePublish(ctx, t, traceMsg, opts...)
+ msg.Attributes = traceMsg.Attributes
+
return &PublishResult{
PublishResult: t.Publish(ctx, msg),
- span: span,
+ closeSpan: closeSpan,
}
}
// PublishResult wraps *pubsub.PublishResult
type PublishResult struct {
*pubsub.PublishResult
- once sync.Once
- span tracer.Span
+ closeSpan func(serverID string, err error)
}
// Get wraps (pubsub.PublishResult).Get(ctx). When this function returns the publish
// span created in Publish is completed.
func (r *PublishResult) Get(ctx context.Context) (string, error) {
serverID, err := r.PublishResult.Get(ctx)
- r.once.Do(func() {
- r.span.SetTag("server_id", serverID)
- r.span.Finish(tracer.WithError(err))
- })
+ r.closeSpan(serverID, err)
return serverID, err
}
@@ -92,37 +49,24 @@ func (r *PublishResult) Get(ctx context.Context) (string, error) {
// extracts any tracing metadata attached to the received message, and starts a
// receive span.
func WrapReceiveHandler(s *pubsub.Subscription, f func(context.Context, *pubsub.Message), opts ...Option) func(context.Context, *pubsub.Message) {
- cfg := defaultConfig()
- for _, opt := range opts {
- opt(cfg)
- }
- log.Debug("contrib/cloud.google.com/go/pubsub.v1: Wrapping Receive Handler: %#v", cfg)
+ traceFn := tracing.TraceReceiveFunc(s, opts...)
return func(ctx context.Context, msg *pubsub.Message) {
- parentSpanCtx, _ := tracer.Extract(tracer.TextMapCarrier(msg.Attributes))
- opts := []ddtrace.StartSpanOption{
- tracer.ResourceName(s.String()),
- tracer.SpanType(ext.SpanTypeMessageConsumer),
- tracer.Tag("message_size", len(msg.Data)),
- tracer.Tag("num_attributes", len(msg.Attributes)),
- tracer.Tag("ordering_key", msg.OrderingKey),
- tracer.Tag("message_id", msg.ID),
- tracer.Tag("publish_time", msg.PublishTime.String()),
- tracer.Tag(ext.Component, componentName),
- tracer.Tag(ext.SpanKind, ext.SpanKindConsumer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemGCPPubsub),
- tracer.ChildOf(parentSpanCtx),
- }
- if cfg.serviceName != "" {
- opts = append(opts, tracer.ServiceName(cfg.serviceName))
- }
- if cfg.measured {
- opts = append(opts, tracer.Measured())
- }
- span, ctx := tracer.StartSpanFromContext(ctx, cfg.receiveSpanName, opts...)
- if msg.DeliveryAttempt != nil {
- span.SetTag("delivery_attempt", *msg.DeliveryAttempt)
- }
- defer span.Finish()
+ ctx, closeSpan := traceFn(ctx, newTraceMessage(msg))
+ defer closeSpan()
f(ctx, msg)
}
}
+
+func newTraceMessage(msg *pubsub.Message) *tracing.Message {
+ if msg == nil {
+ return nil
+ }
+ return &tracing.Message{
+ ID: msg.ID,
+ Data: msg.Data,
+ OrderingKey: msg.OrderingKey,
+ Attributes: msg.Attributes,
+ DeliveryAttempt: msg.DeliveryAttempt,
+ PublishTime: msg.PublishTime,
+ }
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/internal/tracing/consumer.go b/contrib/confluentinc/confluent-kafka-go/internal/tracing/consumer.go
new file mode 100644
index 0000000000..90678c4ed2
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/internal/tracing/consumer.go
@@ -0,0 +1,86 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import (
+ "math"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func WrapConsumeEventsChannel[E any, TE Event](tr *KafkaTracer, in chan E, consumer Consumer, translateFn func(E) TE) chan E {
+ // in will be nil when consuming via the events channel is not enabled
+ if in == nil {
+ return nil
+ }
+
+ out := make(chan E, 1)
+ go func() {
+ defer close(out)
+ for evt := range in {
+ tEvt := translateFn(evt)
+ var next ddtrace.Span
+
+ // only trace messages
+ if msg, ok := tEvt.KafkaMessage(); ok {
+ next = tr.StartConsumeSpan(msg)
+ tr.SetConsumeCheckpoint(msg)
+ } else if offset, ok := tEvt.KafkaOffsetsCommitted(); ok {
+ tr.TrackCommitOffsets(offset.GetOffsets(), offset.GetError())
+ tr.TrackHighWatermarkOffset(offset.GetOffsets(), consumer)
+ }
+
+ out <- evt
+
+ if tr.PrevSpan != nil {
+ tr.PrevSpan.Finish()
+ }
+ tr.PrevSpan = next
+ }
+ // finish any remaining span
+ if tr.PrevSpan != nil {
+ tr.PrevSpan.Finish()
+ tr.PrevSpan = nil
+ }
+ }()
+ return out
+}
+
+func (tr *KafkaTracer) StartConsumeSpan(msg Message) ddtrace.Span {
+ opts := []tracer.StartSpanOption{
+ tracer.ServiceName(tr.consumerServiceName),
+ tracer.ResourceName("Consume Topic " + msg.GetTopicPartition().GetTopic()),
+ tracer.SpanType(ext.SpanTypeMessageConsumer),
+ tracer.Tag(ext.MessagingKafkaPartition, msg.GetTopicPartition().GetPartition()),
+ tracer.Tag("offset", msg.GetTopicPartition().GetOffset()),
+ tracer.Tag(ext.Component, ComponentName(tr.ckgoVersion)),
+ tracer.Tag(ext.SpanKind, ext.SpanKindConsumer),
+ tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
+ tracer.Measured(),
+ }
+ if tr.bootstrapServers != "" {
+ opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, tr.bootstrapServers))
+ }
+ if tr.tagFns != nil {
+ for key, tagFn := range tr.tagFns {
+ opts = append(opts, tracer.Tag(key, tagFn(msg)))
+ }
+ }
+ if !math.IsNaN(tr.analyticsRate) {
+ opts = append(opts, tracer.Tag(ext.EventSampleRate, tr.analyticsRate))
+ }
+ // kafka supports headers, so try to extract a span context
+ carrier := MessageCarrier{msg: msg}
+ if spanctx, err := tracer.Extract(carrier); err == nil {
+ opts = append(opts, tracer.ChildOf(spanctx))
+ }
+ span, _ := tracer.StartSpanFromContext(tr.ctx, tr.consumerSpanName, opts...)
+ // reinject the span context so consumers can pick it up
+ tracer.Inject(span.Context(), carrier)
+ return span
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/internal/tracing/dsm.go b/contrib/confluentinc/confluent-kafka-go/internal/tracing/dsm.go
new file mode 100644
index 0000000000..d27dc18ab8
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/internal/tracing/dsm.go
@@ -0,0 +1,88 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import (
+ "context"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/datastreams"
+ "gopkg.in/DataDog/dd-trace-go.v1/datastreams/options"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func (tr *KafkaTracer) TrackCommitOffsets(offsets []TopicPartition, err error) {
+ if err != nil || tr.groupID == "" || !tr.dsmEnabled {
+ return
+ }
+ for _, tp := range offsets {
+ tracer.TrackKafkaCommitOffset(tr.groupID, tp.GetTopic(), tp.GetPartition(), tp.GetOffset())
+ }
+}
+
+func (tr *KafkaTracer) TrackHighWatermarkOffset(offsets []TopicPartition, consumer Consumer) {
+ if !tr.dsmEnabled {
+ return
+ }
+ for _, tp := range offsets {
+ if _, high, err := consumer.GetWatermarkOffsets(tp.GetTopic(), tp.GetPartition()); err == nil {
+ tracer.TrackKafkaHighWatermarkOffset("", tp.GetTopic(), tp.GetPartition(), high)
+ }
+ }
+}
+
+func (tr *KafkaTracer) TrackProduceOffsets(msg Message) {
+ err := msg.GetTopicPartition().GetError()
+ if err != nil || !tr.dsmEnabled || msg.GetTopicPartition().GetTopic() == "" {
+ return
+ }
+ tp := msg.GetTopicPartition()
+ tracer.TrackKafkaProduceOffset(tp.GetTopic(), tp.GetPartition(), tp.GetOffset())
+}
+
+func (tr *KafkaTracer) SetConsumeCheckpoint(msg Message) {
+ if !tr.dsmEnabled || msg == nil {
+ return
+ }
+ edges := []string{"direction:in", "topic:" + msg.GetTopicPartition().GetTopic(), "type:kafka"}
+ if tr.groupID != "" {
+ edges = append(edges, "group:"+tr.groupID)
+ }
+ carrier := NewMessageCarrier(msg)
+ ctx, ok := tracer.SetDataStreamsCheckpointWithParams(
+ datastreams.ExtractFromBase64Carrier(context.Background(), carrier),
+ options.CheckpointParams{PayloadSize: getMsgSize(msg)},
+ edges...,
+ )
+ if !ok {
+ return
+ }
+ datastreams.InjectToBase64Carrier(ctx, carrier)
+}
+
+func (tr *KafkaTracer) SetProduceCheckpoint(msg Message) {
+ if !tr.dsmEnabled || msg == nil {
+ return
+ }
+ edges := []string{"direction:out", "topic:" + msg.GetTopicPartition().GetTopic(), "type:kafka"}
+ carrier := NewMessageCarrier(msg)
+ ctx, ok := tracer.SetDataStreamsCheckpointWithParams(
+ datastreams.ExtractFromBase64Carrier(context.Background(), carrier),
+ options.CheckpointParams{PayloadSize: getMsgSize(msg)},
+ edges...,
+ )
+ if !ok || tr.librdKafkaVersion < 0x000b0400 {
+ // headers not supported before librdkafka >=0.11.4
+ return
+ }
+ datastreams.InjectToBase64Carrier(ctx, carrier)
+}
+
+func getMsgSize(msg Message) (size int64) {
+ for _, header := range msg.GetHeaders() {
+ size += int64(len(header.GetKey()) + len(header.GetValue()))
+ }
+ return size + int64(len(msg.GetValue())+len(msg.GetKey()))
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/internal/tracing/kafka_tracer.go b/contrib/confluentinc/confluent-kafka-go/internal/tracing/kafka_tracer.go
new file mode 100644
index 0000000000..b293c698ea
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/internal/tracing/kafka_tracer.go
@@ -0,0 +1,142 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import (
+ "context"
+ "math"
+ "net"
+ "strings"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema"
+)
+
+const defaultServiceName = "kafka"
+
+type KafkaTracer struct {
+ PrevSpan ddtrace.Span
+ ctx context.Context
+ consumerServiceName string
+ producerServiceName string
+ consumerSpanName string
+ producerSpanName string
+ analyticsRate float64
+ bootstrapServers string
+ groupID string
+ tagFns map[string]func(msg Message) interface{}
+ dsmEnabled bool
+ ckgoVersion CKGoVersion
+ librdKafkaVersion int
+}
+
+func (tr *KafkaTracer) DSMEnabled() bool {
+ return tr.dsmEnabled
+}
+
+// An Option customizes the KafkaTracer.
+type Option func(tr *KafkaTracer)
+
+func NewKafkaTracer(ckgoVersion CKGoVersion, librdKafkaVersion int, opts ...Option) *KafkaTracer {
+ tr := &KafkaTracer{
+ ctx: context.Background(),
+ // analyticsRate: globalconfig.AnalyticsRate(),
+ analyticsRate: math.NaN(),
+ ckgoVersion: ckgoVersion,
+ librdKafkaVersion: librdKafkaVersion,
+ }
+ tr.dsmEnabled = internal.BoolEnv("DD_DATA_STREAMS_ENABLED", false)
+ if internal.BoolEnv("DD_TRACE_KAFKA_ANALYTICS_ENABLED", false) {
+ tr.analyticsRate = 1.0
+ }
+
+ tr.consumerServiceName = namingschema.ServiceName(defaultServiceName)
+ tr.producerServiceName = namingschema.ServiceNameOverrideV0(defaultServiceName, defaultServiceName)
+ tr.consumerSpanName = namingschema.OpName(namingschema.KafkaInbound)
+ tr.producerSpanName = namingschema.OpName(namingschema.KafkaOutbound)
+
+ for _, opt := range opts {
+ opt(tr)
+ }
+ return tr
+}
+
+// WithContext sets the config context to ctx.
+// Deprecated: This is deprecated in favor of passing the context
+// via the message headers
+func WithContext(ctx context.Context) Option {
+ return func(tr *KafkaTracer) {
+ tr.ctx = ctx
+ }
+}
+
+// WithServiceName sets the config service name to serviceName.
+func WithServiceName(serviceName string) Option {
+ return func(tr *KafkaTracer) {
+ tr.consumerServiceName = serviceName
+ tr.producerServiceName = serviceName
+ }
+}
+
+// WithAnalytics enables Trace Analytics for all started spans.
+func WithAnalytics(on bool) Option {
+ return func(tr *KafkaTracer) {
+ if on {
+ tr.analyticsRate = 1.0
+ } else {
+ tr.analyticsRate = math.NaN()
+ }
+ }
+}
+
+// WithAnalyticsRate sets the sampling rate for Trace Analytics events
+// correlated to started spans.
+func WithAnalyticsRate(rate float64) Option {
+ return func(tr *KafkaTracer) {
+ if rate >= 0.0 && rate <= 1.0 {
+ tr.analyticsRate = rate
+ } else {
+ tr.analyticsRate = math.NaN()
+ }
+ }
+}
+
+// WithCustomTag will cause the given tagFn to be evaluated after executing
+// a query and attach the result to the span tagged by the key.
+func WithCustomTag(tag string, tagFn func(msg Message) interface{}) Option {
+ return func(tr *KafkaTracer) {
+ if tr.tagFns == nil {
+ tr.tagFns = make(map[string]func(msg Message) interface{})
+ }
+ tr.tagFns[tag] = tagFn
+ }
+}
+
+// WithConfig extracts the config information for the client to be tagged
+func WithConfig(cg ConfigMap) Option {
+ return func(tr *KafkaTracer) {
+ if groupID, err := cg.Get("group.id", ""); err == nil {
+ tr.groupID = groupID.(string)
+ }
+ if bs, err := cg.Get("bootstrap.servers", ""); err == nil && bs != "" {
+ for _, addr := range strings.Split(bs.(string), ",") {
+ host, _, err := net.SplitHostPort(addr)
+ if err == nil {
+ tr.bootstrapServers = host
+ return
+ }
+ }
+ }
+ }
+}
+
+// WithDataStreams enables the Data Streams monitoring product features: https://www.datadoghq.com/product/data-streams-monitoring/
+func WithDataStreams() Option {
+ return func(tr *KafkaTracer) {
+ tr.dsmEnabled = true
+ }
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/option_test.go b/contrib/confluentinc/confluent-kafka-go/internal/tracing/kafka_tracer_test.go
similarity index 65%
rename from contrib/confluentinc/confluent-kafka-go/kafka.v2/option_test.go
rename to contrib/confluentinc/confluent-kafka-go/internal/tracing/kafka_tracer_test.go
index d990870fc5..f426458dbb 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka.v2/option_test.go
+++ b/contrib/confluentinc/confluent-kafka-go/internal/tracing/kafka_tracer_test.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016 Datadog, Inc.
-package kafka
+package tracing
import (
"math"
@@ -16,29 +16,29 @@ import (
func TestDataStreamsActivation(t *testing.T) {
t.Run("default", func(t *testing.T) {
- cfg := newConfig()
- assert.False(t, cfg.dataStreamsEnabled)
+ tr := NewKafkaTracer(0, 0)
+ assert.False(t, tr.DSMEnabled())
})
t.Run("withOption", func(t *testing.T) {
- cfg := newConfig(WithDataStreams())
- assert.True(t, cfg.dataStreamsEnabled)
+ tr := NewKafkaTracer(0, 0, WithDataStreams())
+ assert.True(t, tr.DSMEnabled())
})
t.Run("withEnv", func(t *testing.T) {
t.Setenv("DD_DATA_STREAMS_ENABLED", "true")
- cfg := newConfig()
- assert.True(t, cfg.dataStreamsEnabled)
+ tr := NewKafkaTracer(0, 0)
+ assert.True(t, tr.DSMEnabled())
})
t.Run("optionOverridesEnv", func(t *testing.T) {
t.Setenv("DD_DATA_STREAMS_ENABLED", "false")
- cfg := newConfig(WithDataStreams())
- assert.True(t, cfg.dataStreamsEnabled)
+ tr := NewKafkaTracer(0, 0, WithDataStreams())
+ assert.True(t, tr.DSMEnabled())
})
}
func TestAnalyticsSettings(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
- cfg := newConfig()
- assert.True(t, math.IsNaN(cfg.analyticsRate))
+ tr := NewKafkaTracer(0, 0)
+ assert.True(t, math.IsNaN(tr.analyticsRate))
})
t.Run("global", func(t *testing.T) {
@@ -47,13 +47,13 @@ func TestAnalyticsSettings(t *testing.T) {
defer globalconfig.SetAnalyticsRate(rate)
globalconfig.SetAnalyticsRate(0.4)
- cfg := newConfig()
- assert.Equal(t, 0.4, cfg.analyticsRate)
+ tr := NewKafkaTracer(0, 0)
+ assert.Equal(t, 0.4, tr.analyticsRate)
})
t.Run("enabled", func(t *testing.T) {
- cfg := newConfig(WithAnalytics(true))
- assert.Equal(t, 1.0, cfg.analyticsRate)
+ tr := NewKafkaTracer(0, 0, WithAnalytics(true))
+ assert.Equal(t, 1.0, tr.analyticsRate)
})
t.Run("override", func(t *testing.T) {
@@ -61,7 +61,7 @@ func TestAnalyticsSettings(t *testing.T) {
defer globalconfig.SetAnalyticsRate(rate)
globalconfig.SetAnalyticsRate(0.4)
- cfg := newConfig(WithAnalyticsRate(0.2))
- assert.Equal(t, 0.2, cfg.analyticsRate)
+ tr := NewKafkaTracer(0, 0, WithAnalyticsRate(0.2))
+ assert.Equal(t, 0.2, tr.analyticsRate)
})
}
diff --git a/contrib/confluentinc/confluent-kafka-go/internal/tracing/message_carrier.go b/contrib/confluentinc/confluent-kafka-go/internal/tracing/message_carrier.go
new file mode 100644
index 0000000000..5fbeecf9e0
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/internal/tracing/message_carrier.go
@@ -0,0 +1,50 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+// A MessageCarrier implements TextMapReader/TextMapWriter for extracting/injecting traces on a kafka.msg
+type MessageCarrier struct {
+ msg Message
+}
+
+var _ interface {
+ tracer.TextMapReader
+ tracer.TextMapWriter
+} = (*MessageCarrier)(nil)
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c MessageCarrier) ForeachKey(handler func(key, val string) error) error {
+ for _, h := range c.msg.GetHeaders() {
+ err := handler(h.GetKey(), string(h.GetValue()))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Set implements TextMapWriter
+func (c MessageCarrier) Set(key, val string) {
+ headers := c.msg.GetHeaders()
+ // ensure uniqueness of keys
+ for i := 0; i < len(headers); i++ {
+ if headers[i].GetKey() == key {
+ headers = append(headers[:i], headers[i+1:]...)
+ i--
+ }
+ }
+ headers = append(headers, KafkaHeader{
+ Key: key,
+ Value: []byte(val),
+ })
+ c.msg.SetHeaders(headers)
+}
+
+func NewMessageCarrier(msg Message) MessageCarrier {
+ return MessageCarrier{msg: msg}
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/internal/tracing/producer.go b/contrib/confluentinc/confluent-kafka-go/internal/tracing/producer.go
new file mode 100644
index 0000000000..25b043017f
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/internal/tracing/producer.go
@@ -0,0 +1,103 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import (
+ "math"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func WrapProduceChannel[M any, TM Message](tr *KafkaTracer, out chan M, translateFn func(M) TM) chan M {
+ if out == nil {
+ return out
+ }
+ in := make(chan M, 1)
+ go func() {
+ for msg := range in {
+ tMsg := translateFn(msg)
+ span := tr.StartProduceSpan(tMsg)
+ tr.SetProduceCheckpoint(tMsg)
+ out <- msg
+ span.Finish()
+ }
+ }()
+ return in
+}
+
+func WrapProduceEventsChannel[E any, TE Event](tr *KafkaTracer, in chan E, translateFn func(E) TE) chan E {
+ if in == nil {
+ return nil
+ }
+ out := make(chan E, 1)
+ go func() {
+ defer close(out)
+ for evt := range in {
+ tEvt := translateFn(evt)
+ if msg, ok := tEvt.KafkaMessage(); ok {
+ tr.TrackProduceOffsets(msg)
+ }
+ out <- evt
+ }
+ }()
+ return out
+}
+
+func (tr *KafkaTracer) StartProduceSpan(msg Message) ddtrace.Span {
+ opts := []tracer.StartSpanOption{
+ tracer.ServiceName(tr.producerServiceName),
+ tracer.ResourceName("Produce Topic " + msg.GetTopicPartition().GetTopic()),
+ tracer.SpanType(ext.SpanTypeMessageProducer),
+ tracer.Tag(ext.Component, ComponentName(tr.ckgoVersion)),
+ tracer.Tag(ext.SpanKind, ext.SpanKindProducer),
+ tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
+ tracer.Tag(ext.MessagingKafkaPartition, msg.GetTopicPartition().GetPartition()),
+ }
+ if tr.bootstrapServers != "" {
+ opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, tr.bootstrapServers))
+ }
+ if !math.IsNaN(tr.analyticsRate) {
+ opts = append(opts, tracer.Tag(ext.EventSampleRate, tr.analyticsRate))
+ }
+ // if there's a span context in the headers, use that as the parent
+ carrier := NewMessageCarrier(msg)
+ if spanctx, err := tracer.Extract(carrier); err == nil {
+ opts = append(opts, tracer.ChildOf(spanctx))
+ }
+ span, _ := tracer.StartSpanFromContext(tr.ctx, tr.producerSpanName, opts...)
+ // inject the span context so consumers can pick it up
+ tracer.Inject(span.Context(), carrier)
+ return span
+}
+
+func WrapDeliveryChannel[E any, TE Event](tr *KafkaTracer, deliveryChan chan E, span ddtrace.Span, translateFn func(E) TE) (chan E, chan error) {
+ // if the user has selected a delivery channel, we will wrap it and
+ // wait for the delivery event to finish the span
+ if deliveryChan == nil {
+ return nil, nil
+ }
+ wrapped := make(chan E)
+ errChan := make(chan error, 1)
+ go func() {
+ var err error
+ select {
+ case evt := <-wrapped:
+ tEvt := translateFn(evt)
+ if msg, ok := tEvt.KafkaMessage(); ok {
+ // delivery errors are returned via TopicPartition.Error
+ err = msg.GetTopicPartition().GetError()
+ tr.TrackProduceOffsets(msg)
+ }
+ deliveryChan <- evt
+ case e := <-errChan:
+ err = e
+ }
+ span.Finish(tracer.WithError(err))
+ }()
+ return wrapped, errChan
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/internal/tracing/tracing.go b/contrib/confluentinc/confluent-kafka-go/internal/tracing/tracing.go
new file mode 100644
index 0000000000..4e5f78a7d6
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/internal/tracing/tracing.go
@@ -0,0 +1,35 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+type CKGoVersion int32
+
+const (
+ CKGoVersion1 CKGoVersion = 1
+ CKGoVersion2 CKGoVersion = 2
+)
+
+func ComponentName(v CKGoVersion) string {
+ switch v {
+ case CKGoVersion1:
+ return "confluentinc/confluent-kafka-go/kafka"
+ case CKGoVersion2:
+ return "confluentinc/confluent-kafka-go/kafka.v2"
+ default:
+ return ""
+ }
+}
+
+func IntegrationName(v CKGoVersion) string {
+ switch v {
+ case CKGoVersion1:
+ return "github.com/confluentinc/confluent-kafka-go"
+ case CKGoVersion2:
+ return "github.com/confluentinc/confluent-kafka-go/v2"
+ default:
+ return ""
+ }
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/internal/tracing/types.go b/contrib/confluentinc/confluent-kafka-go/internal/tracing/types.go
new file mode 100644
index 0000000000..537c111341
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/internal/tracing/types.go
@@ -0,0 +1,66 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+)
+
+type Message interface {
+ GetValue() []byte
+ GetKey() []byte
+ GetHeaders() []Header
+ SetHeaders([]Header)
+ GetTopicPartition() TopicPartition
+ Unwrap() any
+}
+
+type Header interface {
+ GetKey() string
+ GetValue() []byte
+}
+
+type KafkaHeader struct {
+ Key string
+ Value []byte
+}
+
+func (h KafkaHeader) GetKey() string {
+ return h.Key
+}
+
+func (h KafkaHeader) GetValue() []byte {
+ return h.Value
+}
+
+type OffsetsCommitted interface {
+ GetError() error
+ GetOffsets() []TopicPartition
+}
+
+type TopicPartition interface {
+ GetTopic() string
+ GetPartition() int32
+ GetOffset() int64
+ GetError() error
+}
+
+type Event interface {
+ KafkaMessage() (Message, bool)
+ KafkaOffsetsCommitted() (OffsetsCommitted, bool)
+}
+
+type Consumer interface {
+ GetWatermarkOffsets(topic string, partition int32) (low int64, high int64, err error)
+}
+
+type ConfigMap interface {
+ Get(key string, defval any) (any, error)
+}
+
+type SpanStore struct {
+ Prev ddtrace.Span
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/headers.go b/contrib/confluentinc/confluent-kafka-go/kafka.v2/headers.go
index d940513ff8..c4061543ed 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka.v2/headers.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka.v2/headers.go
@@ -6,48 +6,15 @@
package kafka
import (
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
-
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
-)
-
-// A MessageCarrier injects and extracts traces from a sarama.ProducerMessage.
-type MessageCarrier struct {
- msg *kafka.Message
-}
-var _ interface {
- tracer.TextMapReader
- tracer.TextMapWriter
-} = (*MessageCarrier)(nil)
-
-// ForeachKey iterates over every header.
-func (c MessageCarrier) ForeachKey(handler func(key, val string) error) error {
- for _, h := range c.msg.Headers {
- err := handler(string(h.Key), string(h.Value))
- if err != nil {
- return err
- }
- }
- return nil
-}
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/internal/tracing"
+)
-// Set sets a header.
-func (c MessageCarrier) Set(key, val string) {
- // ensure uniqueness of keys
- for i := 0; i < len(c.msg.Headers); i++ {
- if string(c.msg.Headers[i].Key) == key {
- c.msg.Headers = append(c.msg.Headers[:i], c.msg.Headers[i+1:]...)
- i--
- }
- }
- c.msg.Headers = append(c.msg.Headers, kafka.Header{
- Key: key,
- Value: []byte(val),
- })
-}
+// A MessageCarrier injects and extracts traces from a kafka.Message.
+type MessageCarrier = tracing.MessageCarrier
// NewMessageCarrier creates a new MessageCarrier.
func NewMessageCarrier(msg *kafka.Message) MessageCarrier {
- return MessageCarrier{msg}
+ return tracing.NewMessageCarrier(wrapMessage(msg))
}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka.go b/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka.go
index 463ba2391c..24bbe5a3d8 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka.go
@@ -4,34 +4,32 @@
// Copyright 2016 Datadog, Inc.
// Package kafka provides functions to trace the confluentinc/confluent-kafka-go package (https://github.com/confluentinc/confluent-kafka-go).
-package kafka // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/kafka"
+package kafka // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/kafka.v2"
import (
- "context"
- "math"
"time"
- "gopkg.in/DataDog/dd-trace-go.v1/datastreams"
- "gopkg.in/DataDog/dd-trace-go.v1/datastreams/options"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "github.com/confluentinc/confluent-kafka-go/v2/kafka"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/internal/tracing"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
"gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
-
- "github.com/confluentinc/confluent-kafka-go/v2/kafka"
)
const (
- // make sure these 3 are updated to V2 for the V2 version.
- componentName = "confluentinc/confluent-kafka-go/kafka.v2"
- packageName = "contrib/confluentinc/confluent-kafka-go/kafka.v2"
- integrationName = "github.com/confluentinc/confluent-kafka-go/v2"
+ ckgoVersion = tracing.CKGoVersion2
+ logPrefix = "contrib/confluentinc/confluent-kafka-go/kafka.v2"
)
func init() {
- telemetry.LoadIntegration(componentName)
- tracer.MarkIntegrationImported(integrationName)
+ telemetry.LoadIntegration(tracing.ComponentName(ckgoVersion))
+ tracer.MarkIntegrationImported(tracing.IntegrationName(ckgoVersion))
+}
+
+func newKafkaTracer(opts ...Option) *tracing.KafkaTracer {
+ v, _ := kafka.LibraryVersion()
+ return tracing.NewKafkaTracer(tracing.CKGoVersion2, v, opts...)
}
// NewConsumer calls kafka.NewConsumer and wraps the resulting Consumer.
@@ -57,93 +55,21 @@ func NewProducer(conf *kafka.ConfigMap, opts ...Option) (*Producer, error) {
// A Consumer wraps a kafka.Consumer.
type Consumer struct {
*kafka.Consumer
- cfg *config
+ tracer *tracing.KafkaTracer
events chan kafka.Event
- prev ddtrace.Span
}
// WrapConsumer wraps a kafka.Consumer so that any consumed events are traced.
func WrapConsumer(c *kafka.Consumer, opts ...Option) *Consumer {
wrapped := &Consumer{
Consumer: c,
- cfg: newConfig(opts...),
+ tracer: newKafkaTracer(opts...),
}
- log.Debug("%s: Wrapping Consumer: %#v", packageName, wrapped.cfg)
- wrapped.events = wrapped.traceEventsChannel(c.Events())
+ log.Debug("%s: Wrapping Consumer: %#v", logPrefix, wrapped.tracer)
+ wrapped.events = tracing.WrapConsumeEventsChannel(wrapped.tracer, c.Events(), c, wrapEvent)
return wrapped
}
-func (c *Consumer) traceEventsChannel(in chan kafka.Event) chan kafka.Event {
- // in will be nil when consuming via the events channel is not enabled
- if in == nil {
- return nil
- }
-
- out := make(chan kafka.Event, 1)
- go func() {
- defer close(out)
- for evt := range in {
- var next ddtrace.Span
-
- // only trace messages
- if msg, ok := evt.(*kafka.Message); ok {
- next = c.startSpan(msg)
- setConsumeCheckpoint(c.cfg.dataStreamsEnabled, c.cfg.groupID, msg)
- } else if offset, ok := evt.(kafka.OffsetsCommitted); ok {
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, offset.Offsets, offset.Error)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, offset.Offsets)
- }
-
- out <- evt
-
- if c.prev != nil {
- c.prev.Finish()
- }
- c.prev = next
- }
- // finish any remaining span
- if c.prev != nil {
- c.prev.Finish()
- c.prev = nil
- }
- }()
- return out
-}
-
-func (c *Consumer) startSpan(msg *kafka.Message) ddtrace.Span {
- opts := []tracer.StartSpanOption{
- tracer.ServiceName(c.cfg.consumerServiceName),
- tracer.ResourceName("Consume Topic " + *msg.TopicPartition.Topic),
- tracer.SpanType(ext.SpanTypeMessageConsumer),
- tracer.Tag(ext.MessagingKafkaPartition, msg.TopicPartition.Partition),
- tracer.Tag("offset", msg.TopicPartition.Offset),
- tracer.Tag(ext.Component, componentName),
- tracer.Tag(ext.SpanKind, ext.SpanKindConsumer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
- tracer.Measured(),
- }
- if c.cfg.bootstrapServers != "" {
- opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, c.cfg.bootstrapServers))
- }
- if c.cfg.tagFns != nil {
- for key, tagFn := range c.cfg.tagFns {
- opts = append(opts, tracer.Tag(key, tagFn(msg)))
- }
- }
- if !math.IsNaN(c.cfg.analyticsRate) {
- opts = append(opts, tracer.Tag(ext.EventSampleRate, c.cfg.analyticsRate))
- }
- // kafka supports headers, so try to extract a span context
- carrier := NewMessageCarrier(msg)
- if spanctx, err := tracer.Extract(carrier); err == nil {
- opts = append(opts, tracer.ChildOf(spanctx))
- }
- span, _ := tracer.StartSpanFromContext(c.cfg.ctx, c.cfg.consumerSpanName, opts...)
- // reinject the span context so consumers can pick it up
- tracer.Inject(span.Context(), carrier)
- return span
-}
-
// Close calls the underlying Consumer.Close and if polling is enabled, finishes
// any remaining span.
func (c *Consumer) Close() error {
@@ -151,178 +77,111 @@ func (c *Consumer) Close() error {
// we only close the previous span if consuming via the events channel is
// not enabled, because otherwise there would be a data race from the
// consuming goroutine.
- if c.events == nil && c.prev != nil {
- c.prev.Finish()
- c.prev = nil
+ if c.events == nil && c.tracer.PrevSpan != nil {
+ c.tracer.PrevSpan.Finish()
+ c.tracer.PrevSpan = nil
}
return err
}
-// Events returns the kafka Events channel (if enabled). Message events will be
+// Events returns the kafka Events channel (if enabled). msg events will be
// traced.
func (c *Consumer) Events() chan kafka.Event {
return c.events
}
-// Poll polls the consumer for messages or events. Message will be
+// Poll polls the consumer for messages or events. msg will be
// traced.
func (c *Consumer) Poll(timeoutMS int) (event kafka.Event) {
- if c.prev != nil {
- c.prev.Finish()
- c.prev = nil
+ if c.tracer.PrevSpan != nil {
+ c.tracer.PrevSpan.Finish()
+ c.tracer.PrevSpan = nil
}
evt := c.Consumer.Poll(timeoutMS)
if msg, ok := evt.(*kafka.Message); ok {
- setConsumeCheckpoint(c.cfg.dataStreamsEnabled, c.cfg.groupID, msg)
- c.prev = c.startSpan(msg)
+ tMsg := wrapMessage(msg)
+ c.tracer.SetConsumeCheckpoint(tMsg)
+ c.tracer.PrevSpan = c.tracer.StartConsumeSpan(tMsg)
} else if offset, ok := evt.(kafka.OffsetsCommitted); ok {
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, offset.Offsets, offset.Error)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, offset.Offsets)
+ tOffsets := wrapTopicPartitions(offset.Offsets)
+ c.tracer.TrackCommitOffsets(tOffsets, offset.Error)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
}
return evt
}
-func (c *Consumer) trackHighWatermark(dataStreamsEnabled bool, offsets []kafka.TopicPartition) {
- if !dataStreamsEnabled {
- return
- }
- for _, tp := range offsets {
- if _, high, err := c.Consumer.GetWatermarkOffsets(*tp.Topic, tp.Partition); err == nil {
- tracer.TrackKafkaHighWatermarkOffset("", *tp.Topic, tp.Partition, high)
- }
- }
-}
-
-// ReadMessage polls the consumer for a message. Message will be traced.
+// ReadMessage polls the consumer for a message. msg will be traced.
func (c *Consumer) ReadMessage(timeout time.Duration) (*kafka.Message, error) {
- if c.prev != nil {
- c.prev.Finish()
- c.prev = nil
+ if c.tracer.PrevSpan != nil {
+ c.tracer.PrevSpan.Finish()
+ c.tracer.PrevSpan = nil
}
msg, err := c.Consumer.ReadMessage(timeout)
if err != nil {
return nil, err
}
- setConsumeCheckpoint(c.cfg.dataStreamsEnabled, c.cfg.groupID, msg)
- c.prev = c.startSpan(msg)
+ tMsg := wrapMessage(msg)
+ c.tracer.SetConsumeCheckpoint(tMsg)
+ c.tracer.PrevSpan = c.tracer.StartConsumeSpan(tMsg)
return msg, nil
}
// Commit commits current offsets and tracks the commit offsets if data streams is enabled.
func (c *Consumer) Commit() ([]kafka.TopicPartition, error) {
tps, err := c.Consumer.Commit()
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, tps, err)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, tps)
+ tOffsets := wrapTopicPartitions(tps)
+ c.tracer.TrackCommitOffsets(tOffsets, err)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
return tps, err
}
// CommitMessage commits a message and tracks the commit offsets if data streams is enabled.
func (c *Consumer) CommitMessage(msg *kafka.Message) ([]kafka.TopicPartition, error) {
tps, err := c.Consumer.CommitMessage(msg)
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, tps, err)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, tps)
+ tOffsets := wrapTopicPartitions(tps)
+ c.tracer.TrackCommitOffsets(tOffsets, err)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
return tps, err
}
// CommitOffsets commits provided offsets and tracks the commit offsets if data streams is enabled.
func (c *Consumer) CommitOffsets(offsets []kafka.TopicPartition) ([]kafka.TopicPartition, error) {
tps, err := c.Consumer.CommitOffsets(offsets)
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, tps, err)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, tps)
+ tOffsets := wrapTopicPartitions(tps)
+ c.tracer.TrackCommitOffsets(tOffsets, err)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
return tps, err
}
-func commitOffsets(dataStreamsEnabled bool, groupID string, tps []kafka.TopicPartition, err error) {
- if err != nil || groupID == "" || !dataStreamsEnabled {
- return
- }
- for _, tp := range tps {
- tracer.TrackKafkaCommitOffset(groupID, *tp.Topic, tp.Partition, int64(tp.Offset))
- }
-}
-
-func trackProduceOffsets(dataStreamsEnabled bool, msg *kafka.Message, err error) {
- if err != nil || !dataStreamsEnabled || msg.TopicPartition.Topic == nil {
- return
- }
- tracer.TrackKafkaProduceOffset(*msg.TopicPartition.Topic, msg.TopicPartition.Partition, int64(msg.TopicPartition.Offset))
-}
-
// A Producer wraps a kafka.Producer.
type Producer struct {
*kafka.Producer
- cfg *config
+ tracer *tracing.KafkaTracer
produceChannel chan *kafka.Message
events chan kafka.Event
- libraryVersion int
}
// WrapProducer wraps a kafka.Producer so requests are traced.
func WrapProducer(p *kafka.Producer, opts ...Option) *Producer {
- version, _ := kafka.LibraryVersion()
wrapped := &Producer{
- Producer: p,
- cfg: newConfig(opts...),
- events: p.Events(),
- libraryVersion: version,
+ Producer: p,
+ tracer: newKafkaTracer(opts...),
+ events: p.Events(),
}
- log.Debug("%s: Wrapping Producer: %#v", packageName, wrapped.cfg)
- wrapped.produceChannel = wrapped.traceProduceChannel(p.ProduceChannel())
- if wrapped.cfg.dataStreamsEnabled {
- wrapped.events = wrapped.traceEventsChannel(p.Events())
+ log.Debug("%s: Wrapping Producer: %#v", logPrefix, wrapped.tracer)
+ wrapped.produceChannel = tracing.WrapProduceChannel(wrapped.tracer, p.ProduceChannel(), wrapMessage)
+ if wrapped.tracer.DSMEnabled() {
+ wrapped.events = tracing.WrapProduceEventsChannel(wrapped.tracer, p.Events(), wrapEvent)
}
return wrapped
}
-// Events returns the kafka Events channel (if enabled). Message events will be monitored
+// Events returns the kafka Events channel (if enabled). msg events will be monitored
// with data streams monitoring (if enabled)
func (p *Producer) Events() chan kafka.Event {
return p.events
}
-func (p *Producer) traceProduceChannel(out chan *kafka.Message) chan *kafka.Message {
- if out == nil {
- return out
- }
- in := make(chan *kafka.Message, 1)
- go func() {
- for msg := range in {
- span := p.startSpan(msg)
- setProduceCheckpoint(p.cfg.dataStreamsEnabled, p.libraryVersion, msg)
- out <- msg
- span.Finish()
- }
- }()
- return in
-}
-
-func (p *Producer) startSpan(msg *kafka.Message) ddtrace.Span {
- opts := []tracer.StartSpanOption{
- tracer.ServiceName(p.cfg.producerServiceName),
- tracer.ResourceName("Produce Topic " + *msg.TopicPartition.Topic),
- tracer.SpanType(ext.SpanTypeMessageProducer),
- tracer.Tag(ext.Component, componentName),
- tracer.Tag(ext.SpanKind, ext.SpanKindProducer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
- tracer.Tag(ext.MessagingKafkaPartition, msg.TopicPartition.Partition),
- }
- if p.cfg.bootstrapServers != "" {
- opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, p.cfg.bootstrapServers))
- }
- if !math.IsNaN(p.cfg.analyticsRate) {
- opts = append(opts, tracer.Tag(ext.EventSampleRate, p.cfg.analyticsRate))
- }
- //if there's a span context in the headers, use that as the parent
- carrier := NewMessageCarrier(msg)
- if spanctx, err := tracer.Extract(carrier); err == nil {
- opts = append(opts, tracer.ChildOf(spanctx))
- }
- span, _ := tracer.StartSpanFromContext(p.cfg.ctx, p.cfg.producerSpanName, opts...)
- // inject the span context so consumers can pick it up
- tracer.Inject(span.Context(), carrier)
- return span
-}
-
// Close calls the underlying Producer.Close and also closes the internal
// wrapping producer channel.
func (p *Producer) Close() {
@@ -332,33 +191,23 @@ func (p *Producer) Close() {
// Produce calls the underlying Producer.Produce and traces the request.
func (p *Producer) Produce(msg *kafka.Message, deliveryChan chan kafka.Event) error {
- span := p.startSpan(msg)
+ tMsg := wrapMessage(msg)
+ span := p.tracer.StartProduceSpan(tMsg)
- // if the user has selected a delivery channel, we will wrap it and
- // wait for the delivery event to finish the span
- if deliveryChan != nil {
- oldDeliveryChan := deliveryChan
- deliveryChan = make(chan kafka.Event)
- go func() {
- var err error
- evt := <-deliveryChan
- if msg, ok := evt.(*kafka.Message); ok {
- // delivery errors are returned via TopicPartition.Error
- err = msg.TopicPartition.Error
- trackProduceOffsets(p.cfg.dataStreamsEnabled, msg, err)
- }
- span.Finish(tracer.WithError(err))
- oldDeliveryChan <- evt
- }()
- }
+ var errChan chan error
+ deliveryChan, errChan = tracing.WrapDeliveryChannel(p.tracer, deliveryChan, span, wrapEvent)
+
+ p.tracer.SetProduceCheckpoint(tMsg)
- setProduceCheckpoint(p.cfg.dataStreamsEnabled, p.libraryVersion, msg)
err := p.Producer.Produce(msg, deliveryChan)
- // with no delivery channel or enqueue error, finish immediately
- if err != nil || deliveryChan == nil {
- span.Finish(tracer.WithError(err))
+ if err != nil {
+ if errChan != nil {
+ errChan <- err
+ } else {
+ // with no delivery channel or enqueue error, finish immediately
+ span.Finish(tracer.WithError(err))
+ }
}
-
return err
}
@@ -367,57 +216,3 @@ func (p *Producer) Produce(msg *kafka.Message, deliveryChan chan kafka.Event) er
func (p *Producer) ProduceChannel() chan *kafka.Message {
return p.produceChannel
}
-
-func (p *Producer) traceEventsChannel(in chan kafka.Event) chan kafka.Event {
- if in == nil {
- return nil
- }
- out := make(chan kafka.Event, 1)
- go func() {
- defer close(out)
- for evt := range in {
- if msg, ok := evt.(*kafka.Message); ok {
- trackProduceOffsets(p.cfg.dataStreamsEnabled, msg, msg.TopicPartition.Error)
- }
- out <- evt
- }
- }()
- return out
-}
-
-func setConsumeCheckpoint(dataStreamsEnabled bool, groupID string, msg *kafka.Message) {
- if !dataStreamsEnabled || msg == nil {
- return
- }
- edges := []string{"direction:in", "topic:" + *msg.TopicPartition.Topic, "type:kafka"}
- if groupID != "" {
- edges = append(edges, "group:"+groupID)
- }
- carrier := NewMessageCarrier(msg)
- ctx, ok := tracer.SetDataStreamsCheckpointWithParams(datastreams.ExtractFromBase64Carrier(context.Background(), carrier), options.CheckpointParams{PayloadSize: getMsgSize(msg)}, edges...)
- if !ok {
- return
- }
- datastreams.InjectToBase64Carrier(ctx, carrier)
-}
-
-func setProduceCheckpoint(dataStreamsEnabled bool, version int, msg *kafka.Message) {
- if !dataStreamsEnabled || msg == nil {
- return
- }
- edges := []string{"direction:out", "topic:" + *msg.TopicPartition.Topic, "type:kafka"}
- carrier := NewMessageCarrier(msg)
- ctx, ok := tracer.SetDataStreamsCheckpointWithParams(datastreams.ExtractFromBase64Carrier(context.Background(), carrier), options.CheckpointParams{PayloadSize: getMsgSize(msg)}, edges...)
- if !ok || version < 0x000b0400 {
- // headers not supported before librdkafka >=0.11.4
- return
- }
- datastreams.InjectToBase64Carrier(ctx, carrier)
-}
-
-func getMsgSize(msg *kafka.Message) (size int64) {
- for _, header := range msg.Headers {
- size += int64(len(header.Key) + len(header.Value))
- }
- return size + int64(len(msg.Value)+len(msg.Key))
-}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka_test.go b/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka_test.go
index 8efa05fc58..e57288598b 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka_test.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka.v2/kafka_test.go
@@ -23,6 +23,7 @@ import (
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "go.uber.org/goleak"
)
var (
@@ -30,82 +31,6 @@ var (
testTopic = "gotest"
)
-type consumerActionFn func(c *Consumer) (*kafka.Message, error)
-
-func produceThenConsume(t *testing.T, consumerAction consumerActionFn, producerOpts []Option, consumerOpts []Option) ([]mocktracer.Span, *kafka.Message) {
- if _, ok := os.LookupEnv("INTEGRATION"); !ok {
- t.Skip("to enable integration test, set the INTEGRATION environment variable")
- }
- mt := mocktracer.Start()
- defer mt.Stop()
-
- // first write a message to the topic
- p, err := NewProducer(&kafka.ConfigMap{
- "bootstrap.servers": "127.0.0.1:9092",
- "go.delivery.reports": true,
- }, producerOpts...)
- require.NoError(t, err)
-
- delivery := make(chan kafka.Event, 1)
- err = p.Produce(&kafka.Message{
- TopicPartition: kafka.TopicPartition{
- Topic: &testTopic,
- Partition: 0,
- },
- Key: []byte("key2"),
- Value: []byte("value2"),
- }, delivery)
- require.NoError(t, err)
-
- msg1, _ := (<-delivery).(*kafka.Message)
- p.Close()
-
- // next attempt to consume the message
- c, err := NewConsumer(&kafka.ConfigMap{
- "group.id": testGroupID,
- "bootstrap.servers": "127.0.0.1:9092",
- "fetch.wait.max.ms": 500,
- "socket.timeout.ms": 1500,
- "session.timeout.ms": 1500,
- "enable.auto.offset.store": false,
- }, consumerOpts...)
- require.NoError(t, err)
-
- err = c.Assign([]kafka.TopicPartition{
- {Topic: &testTopic, Partition: 0, Offset: msg1.TopicPartition.Offset},
- })
- require.NoError(t, err)
-
- msg2, err := consumerAction(c)
- require.NoError(t, err)
- _, err = c.CommitMessage(msg2)
- require.NoError(t, err)
- assert.Equal(t, msg1.String(), msg2.String())
- err = c.Close()
- require.NoError(t, err)
-
- spans := mt.FinishedSpans()
- require.Len(t, spans, 2)
- // they should be linked via headers
- assert.Equal(t, spans[0].TraceID(), spans[1].TraceID())
-
- if c.cfg.dataStreamsEnabled {
- backlogs := mt.SentDSMBacklogs()
- toMap := func(b []internaldsm.Backlog) map[string]struct{} {
- m := make(map[string]struct{})
- for _, b := range backlogs {
- m[strings.Join(b.Tags, "")] = struct{}{}
- }
- return m
- }
- backlogsMap := toMap(backlogs)
- require.Contains(t, backlogsMap, "consumer_group:"+testGroupID+"partition:0"+"topic:"+testTopic+"type:kafka_commit")
- require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_high_watermark")
- require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_produce")
- }
- return spans, msg2
-}
-
func TestConsumerChannel(t *testing.T) {
// we can test consuming via the Events channel by artifically sending
// messages. Testing .Poll is done via an integration test.
@@ -164,8 +89,8 @@ func TestConsumerChannel(t *testing.T) {
assert.Equal(t, "queue", s.Tag(ext.SpanType))
assert.Equal(t, int32(1), s.Tag(ext.MessagingKafkaPartition))
assert.Equal(t, 0.3, s.Tag(ext.EventSampleRate))
- assert.Equal(t, kafka.Offset(i+1), s.Tag("offset"))
- assert.Equal(t, componentName, s.Tag(ext.Component))
+ assert.EqualValues(t, kafka.Offset(i+1), s.Tag("offset"))
+ assert.Equal(t, "confluentinc/confluent-kafka-go/kafka.v2", s.Tag(ext.Component))
assert.Equal(t, ext.SpanKindConsumer, s.Tag(ext.SpanKind))
assert.Equal(t, "kafka", s.Tag(ext.MessagingSystem))
}
@@ -179,30 +104,6 @@ func TestConsumerChannel(t *testing.T) {
}
}
-/*
-to run the integration test locally:
-
- docker network create confluent
-
- docker run --rm \
- --name zookeeper \
- --network confluent \
- -p 2181:2181 \
- -e ZOOKEEPER_CLIENT_PORT=2181 \
- confluentinc/cp-zookeeper:5.0.0
-
- docker run --rm \
- --name kafka \
- --network confluent \
- -p 9092:9092 \
- -e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
- -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 \
- -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
- -e KAFKA_CREATE_TOPICS=gotest:1:1 \
- -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 \
- confluentinc/cp-kafka:5.0.0
-*/
-
func TestConsumerFunctional(t *testing.T) {
for _, tt := range []struct {
name string
@@ -236,7 +137,7 @@ func TestConsumerFunctional(t *testing.T) {
assert.Equal(t, 0.1, s0.Tag(ext.EventSampleRate))
assert.Equal(t, "queue", s0.Tag(ext.SpanType))
assert.Equal(t, int32(0), s0.Tag(ext.MessagingKafkaPartition))
- assert.Equal(t, componentName, s0.Tag(ext.Component))
+ assert.Equal(t, "confluentinc/confluent-kafka-go/kafka.v2", s0.Tag(ext.Component))
assert.Equal(t, ext.SpanKindProducer, s0.Tag(ext.SpanKind))
assert.Equal(t, "kafka", s0.Tag(ext.MessagingSystem))
assert.Equal(t, "127.0.0.1", s0.Tag(ext.KafkaBootstrapServers))
@@ -248,7 +149,7 @@ func TestConsumerFunctional(t *testing.T) {
assert.Equal(t, nil, s1.Tag(ext.EventSampleRate))
assert.Equal(t, "queue", s1.Tag(ext.SpanType))
assert.Equal(t, int32(0), s1.Tag(ext.MessagingKafkaPartition))
- assert.Equal(t, componentName, s1.Tag(ext.Component))
+ assert.Equal(t, "confluentinc/confluent-kafka-go/kafka.v2", s1.Tag(ext.Component))
assert.Equal(t, ext.SpanKindConsumer, s1.Tag(ext.SpanKind))
assert.Equal(t, "kafka", s1.Tag(ext.MessagingSystem))
assert.Equal(t, "127.0.0.1", s1.Tag(ext.KafkaBootstrapServers))
@@ -344,7 +245,7 @@ func TestCustomTags(t *testing.T) {
"socket.timeout.ms": 10,
"session.timeout.ms": 10,
"enable.auto.offset.store": false,
- }, WithCustomTag("foo", func(msg *kafka.Message) interface{} {
+ }, WithCustomTag("foo", func(_ *kafka.Message) interface{} {
return "bar"
}), WithCustomTag("key", func(msg *kafka.Message) interface{} {
return msg.Key
@@ -394,3 +295,125 @@ func TestNamingSchema(t *testing.T) {
}
namingschematest.NewKafkaTest(genSpans)(t)
}
+
+// Test we don't leak goroutines and properly close the span when Produce returns an error.
+func TestProduceError(t *testing.T) {
+ defer func() {
+ err := goleak.Find()
+ if err != nil {
+ // if a goroutine is leaking, ensure it is not coming from this package
+ if strings.Contains(err.Error(), "contrib/confluentinc/confluent-kafka-go") {
+ assert.NoError(t, err, "found leaked goroutine(s) from this package")
+ }
+ }
+ }()
+
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // first write a message to the topic
+ p, err := NewProducer(&kafka.ConfigMap{
+ "bootstrap.servers": "127.0.0.1:9092",
+ "go.delivery.reports": true,
+ })
+ require.NoError(t, err)
+ defer p.Close()
+
+ // this empty message should cause an error in the Produce call.
+ topic := ""
+ msg := &kafka.Message{
+ TopicPartition: kafka.TopicPartition{
+ Topic: &topic,
+ },
+ }
+ deliveryChan := make(chan kafka.Event, 1)
+ err = p.Produce(msg, deliveryChan)
+ require.Error(t, err)
+ require.EqualError(t, err, "Local: Invalid argument or configuration")
+
+ select {
+ case <-deliveryChan:
+ assert.Fail(t, "there should be no events in the deliveryChan")
+ case <-time.After(1 * time.Second):
+ // assume there is no event
+ }
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 1)
+}
+
+type consumerActionFn func(c *Consumer) (*kafka.Message, error)
+
+func produceThenConsume(t *testing.T, consumerAction consumerActionFn, producerOpts []Option, consumerOpts []Option) ([]mocktracer.Span, *kafka.Message) {
+ if _, ok := os.LookupEnv("INTEGRATION"); !ok {
+ t.Skip("to enable integration test, set the INTEGRATION environment variable")
+ }
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // first write a message to the topic
+ p, err := NewProducer(&kafka.ConfigMap{
+ "bootstrap.servers": "127.0.0.1:9092",
+ "go.delivery.reports": true,
+ }, producerOpts...)
+ require.NoError(t, err)
+
+ delivery := make(chan kafka.Event, 1)
+ err = p.Produce(&kafka.Message{
+ TopicPartition: kafka.TopicPartition{
+ Topic: &testTopic,
+ Partition: 0,
+ },
+ Key: []byte("key2"),
+ Value: []byte("value2"),
+ }, delivery)
+ require.NoError(t, err)
+
+ msg1, _ := (<-delivery).(*kafka.Message)
+ p.Close()
+
+ // next attempt to consume the message
+ c, err := NewConsumer(&kafka.ConfigMap{
+ "group.id": testGroupID,
+ "bootstrap.servers": "127.0.0.1:9092",
+ "fetch.wait.max.ms": 500,
+ "socket.timeout.ms": 1500,
+ "session.timeout.ms": 1500,
+ "enable.auto.offset.store": false,
+ }, consumerOpts...)
+ require.NoError(t, err)
+
+ err = c.Assign([]kafka.TopicPartition{
+ {Topic: &testTopic, Partition: 0, Offset: msg1.TopicPartition.Offset},
+ })
+ require.NoError(t, err)
+
+ msg2, err := consumerAction(c)
+ require.NoError(t, err)
+ _, err = c.CommitMessage(msg2)
+ require.NoError(t, err)
+ assert.Equal(t, msg1.String(), msg2.String())
+ err = c.Close()
+ require.NoError(t, err)
+
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 2)
+ // they should be linked via headers
+ assert.Equal(t, spans[0].TraceID(), spans[1].TraceID())
+
+ if c.tracer.DSMEnabled() {
+ backlogs := mt.SentDSMBacklogs()
+ toMap := func(_ []internaldsm.Backlog) map[string]struct{} {
+ m := make(map[string]struct{})
+ for _, b := range backlogs {
+ m[strings.Join(b.Tags, "")] = struct{}{}
+ }
+ return m
+ }
+ backlogsMap := toMap(backlogs)
+ require.Contains(t, backlogsMap, "consumer_group:"+testGroupID+"partition:0"+"topic:"+testTopic+"type:kafka_commit")
+ require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_high_watermark")
+ require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_produce")
+ }
+ return spans, msg2
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/option.go b/contrib/confluentinc/confluent-kafka-go/kafka.v2/option.go
index 8c7b8fed05..4061e2b6fa 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka.v2/option.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka.v2/option.go
@@ -6,129 +6,45 @@
package kafka
import (
- "context"
- "math"
- "net"
- "strings"
-
- "gopkg.in/DataDog/dd-trace-go.v1/internal"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema"
-
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
-)
-
-const defaultServiceName = "kafka"
-type config struct {
- ctx context.Context
- consumerServiceName string
- producerServiceName string
- consumerSpanName string
- producerSpanName string
- analyticsRate float64
- bootstrapServers string
- groupID string
- tagFns map[string]func(msg *kafka.Message) interface{}
- dataStreamsEnabled bool
-}
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/internal/tracing"
+)
// An Option customizes the config.
-type Option func(cfg *config)
-
-func newConfig(opts ...Option) *config {
- cfg := &config{
- ctx: context.Background(),
- // analyticsRate: globalconfig.AnalyticsRate(),
- analyticsRate: math.NaN(),
- }
- cfg.dataStreamsEnabled = internal.BoolEnv("DD_DATA_STREAMS_ENABLED", false)
- if internal.BoolEnv("DD_TRACE_KAFKA_ANALYTICS_ENABLED", false) {
- cfg.analyticsRate = 1.0
- }
-
- cfg.consumerServiceName = namingschema.ServiceName(defaultServiceName)
- cfg.producerServiceName = namingschema.ServiceNameOverrideV0(defaultServiceName, defaultServiceName)
- cfg.consumerSpanName = namingschema.OpName(namingschema.KafkaInbound)
- cfg.producerSpanName = namingschema.OpName(namingschema.KafkaOutbound)
-
- for _, opt := range opts {
- opt(cfg)
- }
- return cfg
-}
+type Option = tracing.Option
// WithContext sets the config context to ctx.
// Deprecated: This is deprecated in favor of passing the context
// via the message headers
-func WithContext(ctx context.Context) Option {
- return func(cfg *config) {
- cfg.ctx = ctx
- }
-}
+var WithContext = tracing.WithContext
// WithServiceName sets the config service name to serviceName.
-func WithServiceName(serviceName string) Option {
- return func(cfg *config) {
- cfg.consumerServiceName = serviceName
- cfg.producerServiceName = serviceName
- }
-}
+var WithServiceName = tracing.WithServiceName
// WithAnalytics enables Trace Analytics for all started spans.
-func WithAnalytics(on bool) Option {
- return func(cfg *config) {
- if on {
- cfg.analyticsRate = 1.0
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalytics = tracing.WithAnalytics
// WithAnalyticsRate sets the sampling rate for Trace Analytics events
// correlated to started spans.
-func WithAnalyticsRate(rate float64) Option {
- return func(cfg *config) {
- if rate >= 0.0 && rate <= 1.0 {
- cfg.analyticsRate = rate
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalyticsRate = tracing.WithAnalyticsRate
// WithCustomTag will cause the given tagFn to be evaluated after executing
// a query and attach the result to the span tagged by the key.
func WithCustomTag(tag string, tagFn func(msg *kafka.Message) interface{}) Option {
- return func(cfg *config) {
- if cfg.tagFns == nil {
- cfg.tagFns = make(map[string]func(msg *kafka.Message) interface{})
+ wrapped := func(msg tracing.Message) interface{} {
+ if m, ok := msg.Unwrap().(*kafka.Message); ok {
+ return tagFn(m)
}
- cfg.tagFns[tag] = tagFn
+ return nil
}
+ return tracing.WithCustomTag(tag, wrapped)
}
// WithConfig extracts the config information for the client to be tagged
-func WithConfig(cg *kafka.ConfigMap) Option {
- return func(cfg *config) {
- if groupID, err := cg.Get("group.id", ""); err == nil {
- cfg.groupID = groupID.(string)
- }
- if bs, err := cg.Get("bootstrap.servers", ""); err == nil && bs != "" {
- for _, addr := range strings.Split(bs.(string), ",") {
- host, _, err := net.SplitHostPort(addr)
- if err == nil {
- cfg.bootstrapServers = host
- return
- }
- }
- }
- }
+func WithConfig(cm *kafka.ConfigMap) Option {
+ return tracing.WithConfig(wrapConfigMap(cm))
}
// WithDataStreams enables the Data Streams monitoring product features: https://www.datadoghq.com/product/data-streams-monitoring/
-func WithDataStreams() Option {
- return func(cfg *config) {
- cfg.dataStreamsEnabled = true
- }
-}
+var WithDataStreams = tracing.WithDataStreams
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka.v2/tracing.go b/contrib/confluentinc/confluent-kafka-go/kafka.v2/tracing.go
new file mode 100644
index 0000000000..02f96217ec
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/kafka.v2/tracing.go
@@ -0,0 +1,163 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package kafka
+
+import (
+ "github.com/confluentinc/confluent-kafka-go/v2/kafka"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/internal/tracing"
+)
+
+type wMessage struct {
+ *kafka.Message
+}
+
+func wrapMessage(msg *kafka.Message) tracing.Message {
+ if msg == nil {
+ return nil
+ }
+ return &wMessage{msg}
+}
+
+func (w *wMessage) Unwrap() any {
+ return w.Message
+}
+
+func (w *wMessage) GetValue() []byte {
+ return w.Message.Value
+}
+
+func (w *wMessage) GetKey() []byte {
+ return w.Message.Key
+}
+
+func (w *wMessage) GetHeaders() []tracing.Header {
+ hs := make([]tracing.Header, 0, len(w.Headers))
+ for _, h := range w.Headers {
+ hs = append(hs, wrapHeader(h))
+ }
+ return hs
+}
+
+func (w *wMessage) SetHeaders(headers []tracing.Header) {
+ hs := make([]kafka.Header, 0, len(headers))
+ for _, h := range headers {
+ hs = append(hs, kafka.Header{
+ Key: h.GetKey(),
+ Value: h.GetValue(),
+ })
+ }
+ w.Message.Headers = hs
+}
+
+func (w *wMessage) GetTopicPartition() tracing.TopicPartition {
+ return wrapTopicPartition(w.Message.TopicPartition)
+}
+
+type wHeader struct {
+ kafka.Header
+}
+
+func wrapHeader(h kafka.Header) tracing.Header {
+ return &wHeader{h}
+}
+
+func (w wHeader) GetKey() string {
+ return w.Header.Key
+}
+
+func (w wHeader) GetValue() []byte {
+ return w.Header.Value
+}
+
+type wTopicPartition struct {
+ kafka.TopicPartition
+}
+
+func wrapTopicPartition(tp kafka.TopicPartition) tracing.TopicPartition {
+ return wTopicPartition{tp}
+}
+
+func wrapTopicPartitions(tps []kafka.TopicPartition) []tracing.TopicPartition {
+ wtps := make([]tracing.TopicPartition, 0, len(tps))
+ for _, tp := range tps {
+ wtps = append(wtps, wTopicPartition{tp})
+ }
+ return wtps
+}
+
+func (w wTopicPartition) GetTopic() string {
+ if w.Topic == nil {
+ return ""
+ }
+ return *w.Topic
+}
+
+func (w wTopicPartition) GetPartition() int32 {
+ return w.Partition
+}
+
+func (w wTopicPartition) GetOffset() int64 {
+ return int64(w.Offset)
+}
+
+func (w wTopicPartition) GetError() error {
+ return w.Error
+}
+
+type wEvent struct {
+ kafka.Event
+}
+
+func wrapEvent(event kafka.Event) tracing.Event {
+ return wEvent{event}
+}
+
+func (w wEvent) KafkaMessage() (tracing.Message, bool) {
+ if m, ok := w.Event.(*kafka.Message); ok {
+ return wrapMessage(m), true
+ }
+ return nil, false
+}
+
+func (w wEvent) KafkaOffsetsCommitted() (tracing.OffsetsCommitted, bool) {
+ if oc, ok := w.Event.(kafka.OffsetsCommitted); ok {
+ return wrapOffsetsCommitted(oc), true
+ }
+ return nil, false
+}
+
+type wOffsetsCommitted struct {
+ kafka.OffsetsCommitted
+}
+
+func wrapOffsetsCommitted(oc kafka.OffsetsCommitted) tracing.OffsetsCommitted {
+ return wOffsetsCommitted{oc}
+}
+
+func (w wOffsetsCommitted) GetError() error {
+ return w.Error
+}
+
+func (w wOffsetsCommitted) GetOffsets() []tracing.TopicPartition {
+ ttps := make([]tracing.TopicPartition, 0, len(w.Offsets))
+ for _, tp := range w.Offsets {
+ ttps = append(ttps, wrapTopicPartition(tp))
+ }
+ return ttps
+}
+
+type wConfigMap struct {
+ cfg *kafka.ConfigMap
+}
+
+func wrapConfigMap(cm *kafka.ConfigMap) tracing.ConfigMap {
+ return &wConfigMap{cm}
+}
+
+func (w *wConfigMap) Get(key string, defVal any) (any, error) {
+ return w.cfg.Get(key, defVal)
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/headers.go b/contrib/confluentinc/confluent-kafka-go/kafka/headers.go
index 3f66e98be5..e29f88f30c 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka/headers.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka/headers.go
@@ -6,48 +6,15 @@
package kafka
import (
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
-
"github.com/confluentinc/confluent-kafka-go/kafka"
-)
-
-// A MessageCarrier injects and extracts traces from a sarama.ProducerMessage.
-type MessageCarrier struct {
- msg *kafka.Message
-}
-var _ interface {
- tracer.TextMapReader
- tracer.TextMapWriter
-} = (*MessageCarrier)(nil)
-
-// ForeachKey iterates over every header.
-func (c MessageCarrier) ForeachKey(handler func(key, val string) error) error {
- for _, h := range c.msg.Headers {
- err := handler(string(h.Key), string(h.Value))
- if err != nil {
- return err
- }
- }
- return nil
-}
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/internal/tracing"
+)
-// Set sets a header.
-func (c MessageCarrier) Set(key, val string) {
- // ensure uniqueness of keys
- for i := 0; i < len(c.msg.Headers); i++ {
- if string(c.msg.Headers[i].Key) == key {
- c.msg.Headers = append(c.msg.Headers[:i], c.msg.Headers[i+1:]...)
- i--
- }
- }
- c.msg.Headers = append(c.msg.Headers, kafka.Header{
- Key: key,
- Value: []byte(val),
- })
-}
+// A MessageCarrier injects and extracts traces from a kafka.Message.
+type MessageCarrier = tracing.MessageCarrier
// NewMessageCarrier creates a new MessageCarrier.
func NewMessageCarrier(msg *kafka.Message) MessageCarrier {
- return MessageCarrier{msg}
+ return tracing.NewMessageCarrier(wrapMessage(msg))
}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/kafka.go b/contrib/confluentinc/confluent-kafka-go/kafka/kafka.go
index d78d4c8860..b4f5485c37 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka/kafka.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka/kafka.go
@@ -7,31 +7,29 @@
package kafka // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/kafka"
import (
- "context"
- "math"
"time"
- "gopkg.in/DataDog/dd-trace-go.v1/datastreams"
- "gopkg.in/DataDog/dd-trace-go.v1/datastreams/options"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "github.com/confluentinc/confluent-kafka-go/kafka"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/internal/tracing"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
"gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
-
- "github.com/confluentinc/confluent-kafka-go/kafka"
)
const (
- // make sure these 3 are updated to V2 for the V2 version.
- componentName = "confluentinc/confluent-kafka-go/kafka"
- packageName = "contrib/confluentinc/confluent-kafka-go/kafka"
- integrationName = "github.com/confluentinc/confluent-kafka-go"
+ ckgoVersion = tracing.CKGoVersion1
+ logPrefix = "contrib/confluentinc/confluent-kafka-go/kafka"
)
func init() {
- telemetry.LoadIntegration(componentName)
- tracer.MarkIntegrationImported(integrationName)
+ telemetry.LoadIntegration(tracing.ComponentName(ckgoVersion))
+ tracer.MarkIntegrationImported(tracing.IntegrationName(ckgoVersion))
+}
+
+func newKafkaTracer(opts ...Option) *tracing.KafkaTracer {
+ v, _ := kafka.LibraryVersion()
+ return tracing.NewKafkaTracer(tracing.CKGoVersion1, v, opts...)
}
// NewConsumer calls kafka.NewConsumer and wraps the resulting Consumer.
@@ -57,93 +55,21 @@ func NewProducer(conf *kafka.ConfigMap, opts ...Option) (*Producer, error) {
// A Consumer wraps a kafka.Consumer.
type Consumer struct {
*kafka.Consumer
- cfg *config
+ tracer *tracing.KafkaTracer
events chan kafka.Event
- prev ddtrace.Span
}
// WrapConsumer wraps a kafka.Consumer so that any consumed events are traced.
func WrapConsumer(c *kafka.Consumer, opts ...Option) *Consumer {
wrapped := &Consumer{
Consumer: c,
- cfg: newConfig(opts...),
+ tracer: newKafkaTracer(opts...),
}
- log.Debug("%s: Wrapping Consumer: %#v", packageName, wrapped.cfg)
- wrapped.events = wrapped.traceEventsChannel(c.Events())
+ log.Debug("%s: Wrapping Consumer: %#v", logPrefix, wrapped.tracer)
+ wrapped.events = tracing.WrapConsumeEventsChannel(wrapped.tracer, c.Events(), c, wrapEvent)
return wrapped
}
-func (c *Consumer) traceEventsChannel(in chan kafka.Event) chan kafka.Event {
- // in will be nil when consuming via the events channel is not enabled
- if in == nil {
- return nil
- }
-
- out := make(chan kafka.Event, 1)
- go func() {
- defer close(out)
- for evt := range in {
- var next ddtrace.Span
-
- // only trace messages
- if msg, ok := evt.(*kafka.Message); ok {
- next = c.startSpan(msg)
- setConsumeCheckpoint(c.cfg.dataStreamsEnabled, c.cfg.groupID, msg)
- } else if offset, ok := evt.(kafka.OffsetsCommitted); ok {
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, offset.Offsets, offset.Error)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, offset.Offsets)
- }
-
- out <- evt
-
- if c.prev != nil {
- c.prev.Finish()
- }
- c.prev = next
- }
- // finish any remaining span
- if c.prev != nil {
- c.prev.Finish()
- c.prev = nil
- }
- }()
- return out
-}
-
-func (c *Consumer) startSpan(msg *kafka.Message) ddtrace.Span {
- opts := []tracer.StartSpanOption{
- tracer.ServiceName(c.cfg.consumerServiceName),
- tracer.ResourceName("Consume Topic " + *msg.TopicPartition.Topic),
- tracer.SpanType(ext.SpanTypeMessageConsumer),
- tracer.Tag(ext.MessagingKafkaPartition, msg.TopicPartition.Partition),
- tracer.Tag("offset", msg.TopicPartition.Offset),
- tracer.Tag(ext.Component, componentName),
- tracer.Tag(ext.SpanKind, ext.SpanKindConsumer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
- tracer.Measured(),
- }
- if c.cfg.bootstrapServers != "" {
- opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, c.cfg.bootstrapServers))
- }
- if c.cfg.tagFns != nil {
- for key, tagFn := range c.cfg.tagFns {
- opts = append(opts, tracer.Tag(key, tagFn(msg)))
- }
- }
- if !math.IsNaN(c.cfg.analyticsRate) {
- opts = append(opts, tracer.Tag(ext.EventSampleRate, c.cfg.analyticsRate))
- }
- // kafka supports headers, so try to extract a span context
- carrier := NewMessageCarrier(msg)
- if spanctx, err := tracer.Extract(carrier); err == nil {
- opts = append(opts, tracer.ChildOf(spanctx))
- }
- span, _ := tracer.StartSpanFromContext(c.cfg.ctx, c.cfg.consumerSpanName, opts...)
- // reinject the span context so consumers can pick it up
- tracer.Inject(span.Context(), carrier)
- return span
-}
-
// Close calls the underlying Consumer.Close and if polling is enabled, finishes
// any remaining span.
func (c *Consumer) Close() error {
@@ -151,178 +77,111 @@ func (c *Consumer) Close() error {
// we only close the previous span if consuming via the events channel is
// not enabled, because otherwise there would be a data race from the
// consuming goroutine.
- if c.events == nil && c.prev != nil {
- c.prev.Finish()
- c.prev = nil
+ if c.events == nil && c.tracer.PrevSpan != nil {
+ c.tracer.PrevSpan.Finish()
+ c.tracer.PrevSpan = nil
}
return err
}
-// Events returns the kafka Events channel (if enabled). Message events will be
+// Events returns the kafka Events channel (if enabled). msg events will be
// traced.
func (c *Consumer) Events() chan kafka.Event {
return c.events
}
-// Poll polls the consumer for messages or events. Message will be
+// Poll polls the consumer for messages or events. msg will be
// traced.
func (c *Consumer) Poll(timeoutMS int) (event kafka.Event) {
- if c.prev != nil {
- c.prev.Finish()
- c.prev = nil
+ if c.tracer.PrevSpan != nil {
+ c.tracer.PrevSpan.Finish()
+ c.tracer.PrevSpan = nil
}
evt := c.Consumer.Poll(timeoutMS)
if msg, ok := evt.(*kafka.Message); ok {
- setConsumeCheckpoint(c.cfg.dataStreamsEnabled, c.cfg.groupID, msg)
- c.prev = c.startSpan(msg)
+ tMsg := wrapMessage(msg)
+ c.tracer.SetConsumeCheckpoint(tMsg)
+ c.tracer.PrevSpan = c.tracer.StartConsumeSpan(tMsg)
} else if offset, ok := evt.(kafka.OffsetsCommitted); ok {
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, offset.Offsets, offset.Error)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, offset.Offsets)
+ tOffsets := wrapTopicPartitions(offset.Offsets)
+ c.tracer.TrackCommitOffsets(tOffsets, offset.Error)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
}
return evt
}
-func (c *Consumer) trackHighWatermark(dataStreamsEnabled bool, offsets []kafka.TopicPartition) {
- if !dataStreamsEnabled {
- return
- }
- for _, tp := range offsets {
- if _, high, err := c.Consumer.GetWatermarkOffsets(*tp.Topic, tp.Partition); err == nil {
- tracer.TrackKafkaHighWatermarkOffset("", *tp.Topic, tp.Partition, high)
- }
- }
-}
-
-// ReadMessage polls the consumer for a message. Message will be traced.
+// ReadMessage polls the consumer for a message. msg will be traced.
func (c *Consumer) ReadMessage(timeout time.Duration) (*kafka.Message, error) {
- if c.prev != nil {
- c.prev.Finish()
- c.prev = nil
+ if c.tracer.PrevSpan != nil {
+ c.tracer.PrevSpan.Finish()
+ c.tracer.PrevSpan = nil
}
msg, err := c.Consumer.ReadMessage(timeout)
if err != nil {
return nil, err
}
- setConsumeCheckpoint(c.cfg.dataStreamsEnabled, c.cfg.groupID, msg)
- c.prev = c.startSpan(msg)
+ tMsg := wrapMessage(msg)
+ c.tracer.SetConsumeCheckpoint(tMsg)
+ c.tracer.PrevSpan = c.tracer.StartConsumeSpan(tMsg)
return msg, nil
}
// Commit commits current offsets and tracks the commit offsets if data streams is enabled.
func (c *Consumer) Commit() ([]kafka.TopicPartition, error) {
tps, err := c.Consumer.Commit()
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, tps, err)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, tps)
+ tOffsets := wrapTopicPartitions(tps)
+ c.tracer.TrackCommitOffsets(tOffsets, err)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
return tps, err
}
// CommitMessage commits a message and tracks the commit offsets if data streams is enabled.
func (c *Consumer) CommitMessage(msg *kafka.Message) ([]kafka.TopicPartition, error) {
tps, err := c.Consumer.CommitMessage(msg)
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, tps, err)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, tps)
+ tOffsets := wrapTopicPartitions(tps)
+ c.tracer.TrackCommitOffsets(tOffsets, err)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
return tps, err
}
// CommitOffsets commits provided offsets and tracks the commit offsets if data streams is enabled.
func (c *Consumer) CommitOffsets(offsets []kafka.TopicPartition) ([]kafka.TopicPartition, error) {
tps, err := c.Consumer.CommitOffsets(offsets)
- commitOffsets(c.cfg.dataStreamsEnabled, c.cfg.groupID, tps, err)
- c.trackHighWatermark(c.cfg.dataStreamsEnabled, tps)
+ tOffsets := wrapTopicPartitions(tps)
+ c.tracer.TrackCommitOffsets(tOffsets, err)
+ c.tracer.TrackHighWatermarkOffset(tOffsets, c.Consumer)
return tps, err
}
-func commitOffsets(dataStreamsEnabled bool, groupID string, tps []kafka.TopicPartition, err error) {
- if err != nil || groupID == "" || !dataStreamsEnabled {
- return
- }
- for _, tp := range tps {
- tracer.TrackKafkaCommitOffset(groupID, *tp.Topic, tp.Partition, int64(tp.Offset))
- }
-}
-
-func trackProduceOffsets(dataStreamsEnabled bool, msg *kafka.Message, err error) {
- if err != nil || !dataStreamsEnabled || msg.TopicPartition.Topic == nil {
- return
- }
- tracer.TrackKafkaProduceOffset(*msg.TopicPartition.Topic, msg.TopicPartition.Partition, int64(msg.TopicPartition.Offset))
-}
-
// A Producer wraps a kafka.Producer.
type Producer struct {
*kafka.Producer
- cfg *config
+ tracer *tracing.KafkaTracer
produceChannel chan *kafka.Message
events chan kafka.Event
- libraryVersion int
}
// WrapProducer wraps a kafka.Producer so requests are traced.
func WrapProducer(p *kafka.Producer, opts ...Option) *Producer {
- version, _ := kafka.LibraryVersion()
wrapped := &Producer{
- Producer: p,
- cfg: newConfig(opts...),
- events: p.Events(),
- libraryVersion: version,
+ Producer: p,
+ tracer: newKafkaTracer(opts...),
+ events: p.Events(),
}
- log.Debug("%s: Wrapping Producer: %#v", packageName, wrapped.cfg)
- wrapped.produceChannel = wrapped.traceProduceChannel(p.ProduceChannel())
- if wrapped.cfg.dataStreamsEnabled {
- wrapped.events = wrapped.traceEventsChannel(p.Events())
+ log.Debug("%s: Wrapping Producer: %#v", logPrefix, wrapped.tracer)
+ wrapped.produceChannel = tracing.WrapProduceChannel(wrapped.tracer, p.ProduceChannel(), wrapMessage)
+ if wrapped.tracer.DSMEnabled() {
+ wrapped.events = tracing.WrapProduceEventsChannel(wrapped.tracer, p.Events(), wrapEvent)
}
return wrapped
}
-// Events returns the kafka Events channel (if enabled). Message events will be monitored
+// Events returns the kafka Events channel (if enabled). msg events will be monitored
// with data streams monitoring (if enabled)
func (p *Producer) Events() chan kafka.Event {
return p.events
}
-func (p *Producer) traceProduceChannel(out chan *kafka.Message) chan *kafka.Message {
- if out == nil {
- return out
- }
- in := make(chan *kafka.Message, 1)
- go func() {
- for msg := range in {
- span := p.startSpan(msg)
- setProduceCheckpoint(p.cfg.dataStreamsEnabled, p.libraryVersion, msg)
- out <- msg
- span.Finish()
- }
- }()
- return in
-}
-
-func (p *Producer) startSpan(msg *kafka.Message) ddtrace.Span {
- opts := []tracer.StartSpanOption{
- tracer.ServiceName(p.cfg.producerServiceName),
- tracer.ResourceName("Produce Topic " + *msg.TopicPartition.Topic),
- tracer.SpanType(ext.SpanTypeMessageProducer),
- tracer.Tag(ext.Component, componentName),
- tracer.Tag(ext.SpanKind, ext.SpanKindProducer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
- tracer.Tag(ext.MessagingKafkaPartition, msg.TopicPartition.Partition),
- }
- if p.cfg.bootstrapServers != "" {
- opts = append(opts, tracer.Tag(ext.KafkaBootstrapServers, p.cfg.bootstrapServers))
- }
- if !math.IsNaN(p.cfg.analyticsRate) {
- opts = append(opts, tracer.Tag(ext.EventSampleRate, p.cfg.analyticsRate))
- }
- // if there's a span context in the headers, use that as the parent
- carrier := NewMessageCarrier(msg)
- if spanctx, err := tracer.Extract(carrier); err == nil {
- opts = append(opts, tracer.ChildOf(spanctx))
- }
- span, _ := tracer.StartSpanFromContext(p.cfg.ctx, p.cfg.producerSpanName, opts...)
- // inject the span context so consumers can pick it up
- tracer.Inject(span.Context(), carrier)
- return span
-}
-
// Close calls the underlying Producer.Close and also closes the internal
// wrapping producer channel.
func (p *Producer) Close() {
@@ -332,33 +191,23 @@ func (p *Producer) Close() {
// Produce calls the underlying Producer.Produce and traces the request.
func (p *Producer) Produce(msg *kafka.Message, deliveryChan chan kafka.Event) error {
- span := p.startSpan(msg)
+ tMsg := wrapMessage(msg)
+ span := p.tracer.StartProduceSpan(tMsg)
- // if the user has selected a delivery channel, we will wrap it and
- // wait for the delivery event to finish the span
- if deliveryChan != nil {
- oldDeliveryChan := deliveryChan
- deliveryChan = make(chan kafka.Event)
- go func() {
- var err error
- evt := <-deliveryChan
- if msg, ok := evt.(*kafka.Message); ok {
- // delivery errors are returned via TopicPartition.Error
- err = msg.TopicPartition.Error
- trackProduceOffsets(p.cfg.dataStreamsEnabled, msg, err)
- }
- span.Finish(tracer.WithError(err))
- oldDeliveryChan <- evt
- }()
- }
+ var errChan chan error
+ deliveryChan, errChan = tracing.WrapDeliveryChannel(p.tracer, deliveryChan, span, wrapEvent)
+
+ p.tracer.SetProduceCheckpoint(tMsg)
- setProduceCheckpoint(p.cfg.dataStreamsEnabled, p.libraryVersion, msg)
err := p.Producer.Produce(msg, deliveryChan)
- // with no delivery channel or enqueue error, finish immediately
- if err != nil || deliveryChan == nil {
- span.Finish(tracer.WithError(err))
+ if err != nil {
+ if errChan != nil {
+ errChan <- err
+ } else {
+ // with no delivery channel or enqueue error, finish immediately
+ span.Finish(tracer.WithError(err))
+ }
}
-
return err
}
@@ -367,57 +216,3 @@ func (p *Producer) Produce(msg *kafka.Message, deliveryChan chan kafka.Event) er
func (p *Producer) ProduceChannel() chan *kafka.Message {
return p.produceChannel
}
-
-func (p *Producer) traceEventsChannel(in chan kafka.Event) chan kafka.Event {
- if in == nil {
- return nil
- }
- out := make(chan kafka.Event, 1)
- go func() {
- defer close(out)
- for evt := range in {
- if msg, ok := evt.(*kafka.Message); ok {
- trackProduceOffsets(p.cfg.dataStreamsEnabled, msg, msg.TopicPartition.Error)
- }
- out <- evt
- }
- }()
- return out
-}
-
-func setConsumeCheckpoint(dataStreamsEnabled bool, groupID string, msg *kafka.Message) {
- if !dataStreamsEnabled || msg == nil {
- return
- }
- edges := []string{"direction:in", "topic:" + *msg.TopicPartition.Topic, "type:kafka"}
- if groupID != "" {
- edges = append(edges, "group:"+groupID)
- }
- carrier := NewMessageCarrier(msg)
- ctx, ok := tracer.SetDataStreamsCheckpointWithParams(datastreams.ExtractFromBase64Carrier(context.Background(), carrier), options.CheckpointParams{PayloadSize: getMsgSize(msg)}, edges...)
- if !ok {
- return
- }
- datastreams.InjectToBase64Carrier(ctx, carrier)
-}
-
-func setProduceCheckpoint(dataStreamsEnabled bool, version int, msg *kafka.Message) {
- if !dataStreamsEnabled || msg == nil {
- return
- }
- edges := []string{"direction:out", "topic:" + *msg.TopicPartition.Topic, "type:kafka"}
- carrier := NewMessageCarrier(msg)
- ctx, ok := tracer.SetDataStreamsCheckpointWithParams(datastreams.ExtractFromBase64Carrier(context.Background(), carrier), options.CheckpointParams{PayloadSize: getMsgSize(msg)}, edges...)
- if !ok || version < 0x000b0400 {
- // headers not supported before librdkafka >=0.11.4
- return
- }
- datastreams.InjectToBase64Carrier(ctx, carrier)
-}
-
-func getMsgSize(msg *kafka.Message) (size int64) {
- for _, header := range msg.Headers {
- size += int64(len(header.Key) + len(header.Value))
- }
- return size + int64(len(msg.Value)+len(msg.Key))
-}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/kafka_test.go b/contrib/confluentinc/confluent-kafka-go/kafka/kafka_test.go
index 4707a1e5ae..6f4b70dd78 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka/kafka_test.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka/kafka_test.go
@@ -23,6 +23,7 @@ import (
"github.com/confluentinc/confluent-kafka-go/kafka"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "go.uber.org/goleak"
)
var (
@@ -30,82 +31,6 @@ var (
testTopic = "gotest"
)
-type consumerActionFn func(c *Consumer) (*kafka.Message, error)
-
-func produceThenConsume(t *testing.T, consumerAction consumerActionFn, producerOpts []Option, consumerOpts []Option) ([]mocktracer.Span, *kafka.Message) {
- if _, ok := os.LookupEnv("INTEGRATION"); !ok {
- t.Skip("to enable integration test, set the INTEGRATION environment variable")
- }
- mt := mocktracer.Start()
- defer mt.Stop()
-
- // first write a message to the topic
- p, err := NewProducer(&kafka.ConfigMap{
- "bootstrap.servers": "127.0.0.1:9092",
- "go.delivery.reports": true,
- }, producerOpts...)
- require.NoError(t, err)
-
- delivery := make(chan kafka.Event, 1)
- err = p.Produce(&kafka.Message{
- TopicPartition: kafka.TopicPartition{
- Topic: &testTopic,
- Partition: 0,
- },
- Key: []byte("key2"),
- Value: []byte("value2"),
- }, delivery)
- require.NoError(t, err)
-
- msg1, _ := (<-delivery).(*kafka.Message)
- p.Close()
-
- // next attempt to consume the message
- c, err := NewConsumer(&kafka.ConfigMap{
- "group.id": testGroupID,
- "bootstrap.servers": "127.0.0.1:9092",
- "fetch.wait.max.ms": 500,
- "socket.timeout.ms": 1500,
- "session.timeout.ms": 1500,
- "enable.auto.offset.store": false,
- }, consumerOpts...)
- require.NoError(t, err)
-
- err = c.Assign([]kafka.TopicPartition{
- {Topic: &testTopic, Partition: 0, Offset: msg1.TopicPartition.Offset},
- })
- require.NoError(t, err)
-
- msg2, err := consumerAction(c)
- require.NoError(t, err)
- _, err = c.CommitMessage(msg2)
- require.NoError(t, err)
- assert.Equal(t, msg1.String(), msg2.String())
- err = c.Close()
- require.NoError(t, err)
-
- spans := mt.FinishedSpans()
- require.Len(t, spans, 2)
- // they should be linked via headers
- assert.Equal(t, spans[0].TraceID(), spans[1].TraceID())
-
- if c.cfg.dataStreamsEnabled {
- backlogs := mt.SentDSMBacklogs()
- toMap := func(b []internaldsm.Backlog) map[string]struct{} {
- m := make(map[string]struct{})
- for _, b := range backlogs {
- m[strings.Join(b.Tags, "")] = struct{}{}
- }
- return m
- }
- backlogsMap := toMap(backlogs)
- require.Contains(t, backlogsMap, "consumer_group:"+testGroupID+"partition:0"+"topic:"+testTopic+"type:kafka_commit")
- require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_high_watermark")
- require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_produce")
- }
- return spans, msg2
-}
-
func TestConsumerChannel(t *testing.T) {
// we can test consuming via the Events channel by artifically sending
// messages. Testing .Poll is done via an integration test.
@@ -164,8 +89,8 @@ func TestConsumerChannel(t *testing.T) {
assert.Equal(t, "queue", s.Tag(ext.SpanType))
assert.Equal(t, int32(1), s.Tag(ext.MessagingKafkaPartition))
assert.Equal(t, 0.3, s.Tag(ext.EventSampleRate))
- assert.Equal(t, kafka.Offset(i+1), s.Tag("offset"))
- assert.Equal(t, componentName, s.Tag(ext.Component))
+ assert.EqualValues(t, kafka.Offset(i+1), s.Tag("offset"))
+ assert.Equal(t, "confluentinc/confluent-kafka-go/kafka", s.Tag(ext.Component))
assert.Equal(t, ext.SpanKindConsumer, s.Tag(ext.SpanKind))
assert.Equal(t, "kafka", s.Tag(ext.MessagingSystem))
}
@@ -179,30 +104,6 @@ func TestConsumerChannel(t *testing.T) {
}
}
-/*
-to run the integration test locally:
-
- docker network create confluent
-
- docker run --rm \
- --name zookeeper \
- --network confluent \
- -p 2181:2181 \
- -e ZOOKEEPER_CLIENT_PORT=2181 \
- confluentinc/cp-zookeeper:5.0.0
-
- docker run --rm \
- --name kafka \
- --network confluent \
- -p 9092:9092 \
- -e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
- -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 \
- -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
- -e KAFKA_CREATE_TOPICS=gotest:1:1 \
- -e KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 \
- confluentinc/cp-kafka:5.0.0
-*/
-
func TestConsumerFunctional(t *testing.T) {
for _, tt := range []struct {
name string
@@ -236,7 +137,7 @@ func TestConsumerFunctional(t *testing.T) {
assert.Equal(t, 0.1, s0.Tag(ext.EventSampleRate))
assert.Equal(t, "queue", s0.Tag(ext.SpanType))
assert.Equal(t, int32(0), s0.Tag(ext.MessagingKafkaPartition))
- assert.Equal(t, componentName, s0.Tag(ext.Component))
+ assert.Equal(t, "confluentinc/confluent-kafka-go/kafka", s0.Tag(ext.Component))
assert.Equal(t, ext.SpanKindProducer, s0.Tag(ext.SpanKind))
assert.Equal(t, "kafka", s0.Tag(ext.MessagingSystem))
assert.Equal(t, "127.0.0.1", s0.Tag(ext.KafkaBootstrapServers))
@@ -248,7 +149,7 @@ func TestConsumerFunctional(t *testing.T) {
assert.Equal(t, nil, s1.Tag(ext.EventSampleRate))
assert.Equal(t, "queue", s1.Tag(ext.SpanType))
assert.Equal(t, int32(0), s1.Tag(ext.MessagingKafkaPartition))
- assert.Equal(t, componentName, s1.Tag(ext.Component))
+ assert.Equal(t, "confluentinc/confluent-kafka-go/kafka", s1.Tag(ext.Component))
assert.Equal(t, ext.SpanKindConsumer, s1.Tag(ext.SpanKind))
assert.Equal(t, "kafka", s1.Tag(ext.MessagingSystem))
assert.Equal(t, "127.0.0.1", s1.Tag(ext.KafkaBootstrapServers))
@@ -344,7 +245,7 @@ func TestCustomTags(t *testing.T) {
"socket.timeout.ms": 10,
"session.timeout.ms": 10,
"enable.auto.offset.store": false,
- }, WithCustomTag("foo", func(msg *kafka.Message) interface{} {
+ }, WithCustomTag("foo", func(_ *kafka.Message) interface{} {
return "bar"
}), WithCustomTag("key", func(msg *kafka.Message) interface{} {
return msg.Key
@@ -394,3 +295,124 @@ func TestNamingSchema(t *testing.T) {
}
namingschematest.NewKafkaTest(genSpans)(t)
}
+
+// Test we don't leak goroutines and properly close the span when Produce returns an error
+func TestProduceError(t *testing.T) {
+ defer func() {
+ err := goleak.Find()
+ if err != nil {
+ // if a goroutine is leaking, ensure it is not coming from this package
+ if strings.Contains(err.Error(), "contrib/confluentinc/confluent-kafka-go") {
+ assert.NoError(t, err, "found leaked goroutine(s) from this package")
+ }
+ }
+ }()
+
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // first write a message to the topic
+ p, err := NewProducer(&kafka.ConfigMap{
+ "bootstrap.servers": "127.0.0.1:9092",
+ "go.delivery.reports": true,
+ })
+ require.NoError(t, err)
+ defer p.Close()
+
+ topic := ""
+ msg := &kafka.Message{
+ TopicPartition: kafka.TopicPartition{
+ Topic: &topic,
+ },
+ }
+ deliveryChan := make(chan kafka.Event, 1)
+ err = p.Produce(msg, deliveryChan)
+ require.Error(t, err)
+ require.EqualError(t, err, "Local: Invalid argument or configuration")
+
+ select {
+ case <-deliveryChan:
+ assert.Fail(t, "there should be no events in the deliveryChan")
+ case <-time.After(1 * time.Second):
+ // assume there is no event
+ }
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 1)
+}
+
+type consumerActionFn func(c *Consumer) (*kafka.Message, error)
+
+func produceThenConsume(t *testing.T, consumerAction consumerActionFn, producerOpts []Option, consumerOpts []Option) ([]mocktracer.Span, *kafka.Message) {
+ if _, ok := os.LookupEnv("INTEGRATION"); !ok {
+ t.Skip("to enable integration test, set the INTEGRATION environment variable")
+ }
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // first write a message to the topic
+ p, err := NewProducer(&kafka.ConfigMap{
+ "bootstrap.servers": "127.0.0.1:9092",
+ "go.delivery.reports": true,
+ }, producerOpts...)
+ require.NoError(t, err)
+
+ delivery := make(chan kafka.Event, 1)
+ err = p.Produce(&kafka.Message{
+ TopicPartition: kafka.TopicPartition{
+ Topic: &testTopic,
+ Partition: 0,
+ },
+ Key: []byte("key2"),
+ Value: []byte("value2"),
+ }, delivery)
+ require.NoError(t, err)
+
+ msg1, _ := (<-delivery).(*kafka.Message)
+ p.Close()
+
+ // next attempt to consume the message
+ c, err := NewConsumer(&kafka.ConfigMap{
+ "group.id": testGroupID,
+ "bootstrap.servers": "127.0.0.1:9092",
+ "fetch.wait.max.ms": 500,
+ "socket.timeout.ms": 1500,
+ "session.timeout.ms": 1500,
+ "enable.auto.offset.store": false,
+ }, consumerOpts...)
+ require.NoError(t, err)
+
+ err = c.Assign([]kafka.TopicPartition{
+ {Topic: &testTopic, Partition: 0, Offset: msg1.TopicPartition.Offset},
+ })
+ require.NoError(t, err)
+
+ msg2, err := consumerAction(c)
+ require.NoError(t, err)
+ _, err = c.CommitMessage(msg2)
+ require.NoError(t, err)
+ assert.Equal(t, msg1.String(), msg2.String())
+ err = c.Close()
+ require.NoError(t, err)
+
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 2)
+ // they should be linked via headers
+ assert.Equal(t, spans[0].TraceID(), spans[1].TraceID())
+
+ if c.tracer.DSMEnabled() {
+ backlogs := mt.SentDSMBacklogs()
+ toMap := func(_ []internaldsm.Backlog) map[string]struct{} {
+ m := make(map[string]struct{})
+ for _, b := range backlogs {
+ m[strings.Join(b.Tags, "")] = struct{}{}
+ }
+ return m
+ }
+ backlogsMap := toMap(backlogs)
+ require.Contains(t, backlogsMap, "consumer_group:"+testGroupID+"partition:0"+"topic:"+testTopic+"type:kafka_commit")
+ require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_high_watermark")
+ require.Contains(t, backlogsMap, "partition:0"+"topic:"+testTopic+"type:kafka_produce")
+ }
+ return spans, msg2
+}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/option.go b/contrib/confluentinc/confluent-kafka-go/kafka/option.go
index 7898d4607c..707bdd1214 100644
--- a/contrib/confluentinc/confluent-kafka-go/kafka/option.go
+++ b/contrib/confluentinc/confluent-kafka-go/kafka/option.go
@@ -6,129 +6,45 @@
package kafka
import (
- "context"
- "math"
- "net"
- "strings"
-
- "gopkg.in/DataDog/dd-trace-go.v1/internal"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema"
-
"github.com/confluentinc/confluent-kafka-go/kafka"
-)
-
-const defaultServiceName = "kafka"
-type config struct {
- ctx context.Context
- consumerServiceName string
- producerServiceName string
- consumerSpanName string
- producerSpanName string
- analyticsRate float64
- bootstrapServers string
- groupID string
- tagFns map[string]func(msg *kafka.Message) interface{}
- dataStreamsEnabled bool
-}
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/internal/tracing"
+)
// An Option customizes the config.
-type Option func(cfg *config)
-
-func newConfig(opts ...Option) *config {
- cfg := &config{
- ctx: context.Background(),
- // analyticsRate: globalconfig.AnalyticsRate(),
- analyticsRate: math.NaN(),
- }
- cfg.dataStreamsEnabled = internal.BoolEnv("DD_DATA_STREAMS_ENABLED", false)
- if internal.BoolEnv("DD_TRACE_KAFKA_ANALYTICS_ENABLED", false) {
- cfg.analyticsRate = 1.0
- }
-
- cfg.consumerServiceName = namingschema.ServiceName(defaultServiceName)
- cfg.producerServiceName = namingschema.ServiceNameOverrideV0(defaultServiceName, defaultServiceName)
- cfg.consumerSpanName = namingschema.OpName(namingschema.KafkaInbound)
- cfg.producerSpanName = namingschema.OpName(namingschema.KafkaOutbound)
-
- for _, opt := range opts {
- opt(cfg)
- }
- return cfg
-}
+type Option = tracing.Option
// WithContext sets the config context to ctx.
// Deprecated: This is deprecated in favor of passing the context
// via the message headers
-func WithContext(ctx context.Context) Option {
- return func(cfg *config) {
- cfg.ctx = ctx
- }
-}
+var WithContext = tracing.WithContext
// WithServiceName sets the config service name to serviceName.
-func WithServiceName(serviceName string) Option {
- return func(cfg *config) {
- cfg.consumerServiceName = serviceName
- cfg.producerServiceName = serviceName
- }
-}
+var WithServiceName = tracing.WithServiceName
// WithAnalytics enables Trace Analytics for all started spans.
-func WithAnalytics(on bool) Option {
- return func(cfg *config) {
- if on {
- cfg.analyticsRate = 1.0
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalytics = tracing.WithAnalytics
// WithAnalyticsRate sets the sampling rate for Trace Analytics events
// correlated to started spans.
-func WithAnalyticsRate(rate float64) Option {
- return func(cfg *config) {
- if rate >= 0.0 && rate <= 1.0 {
- cfg.analyticsRate = rate
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalyticsRate = tracing.WithAnalyticsRate
// WithCustomTag will cause the given tagFn to be evaluated after executing
// a query and attach the result to the span tagged by the key.
func WithCustomTag(tag string, tagFn func(msg *kafka.Message) interface{}) Option {
- return func(cfg *config) {
- if cfg.tagFns == nil {
- cfg.tagFns = make(map[string]func(msg *kafka.Message) interface{})
+ wrapped := func(msg tracing.Message) interface{} {
+ if m, ok := msg.Unwrap().(*kafka.Message); ok {
+ return tagFn(m)
}
- cfg.tagFns[tag] = tagFn
+ return nil
}
+ return tracing.WithCustomTag(tag, wrapped)
}
// WithConfig extracts the config information for the client to be tagged
-func WithConfig(cg *kafka.ConfigMap) Option {
- return func(cfg *config) {
- if groupID, err := cg.Get("group.id", ""); err == nil {
- cfg.groupID = groupID.(string)
- }
- if bs, err := cg.Get("bootstrap.servers", ""); err == nil && bs != "" {
- for _, addr := range strings.Split(bs.(string), ",") {
- host, _, err := net.SplitHostPort(addr)
- if err == nil {
- cfg.bootstrapServers = host
- return
- }
- }
- }
- }
+func WithConfig(cm *kafka.ConfigMap) Option {
+ return tracing.WithConfig(wrapConfigMap(cm))
}
// WithDataStreams enables the Data Streams monitoring product features: https://www.datadoghq.com/product/data-streams-monitoring/
-func WithDataStreams() Option {
- return func(cfg *config) {
- cfg.dataStreamsEnabled = true
- }
-}
+var WithDataStreams = tracing.WithDataStreams
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/option_test.go b/contrib/confluentinc/confluent-kafka-go/kafka/option_test.go
deleted file mode 100644
index d990870fc5..0000000000
--- a/contrib/confluentinc/confluent-kafka-go/kafka/option_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package kafka
-
-import (
- "math"
- "testing"
-
- "gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestDataStreamsActivation(t *testing.T) {
- t.Run("default", func(t *testing.T) {
- cfg := newConfig()
- assert.False(t, cfg.dataStreamsEnabled)
- })
- t.Run("withOption", func(t *testing.T) {
- cfg := newConfig(WithDataStreams())
- assert.True(t, cfg.dataStreamsEnabled)
- })
- t.Run("withEnv", func(t *testing.T) {
- t.Setenv("DD_DATA_STREAMS_ENABLED", "true")
- cfg := newConfig()
- assert.True(t, cfg.dataStreamsEnabled)
- })
- t.Run("optionOverridesEnv", func(t *testing.T) {
- t.Setenv("DD_DATA_STREAMS_ENABLED", "false")
- cfg := newConfig(WithDataStreams())
- assert.True(t, cfg.dataStreamsEnabled)
- })
-}
-
-func TestAnalyticsSettings(t *testing.T) {
- t.Run("defaults", func(t *testing.T) {
- cfg := newConfig()
- assert.True(t, math.IsNaN(cfg.analyticsRate))
- })
-
- t.Run("global", func(t *testing.T) {
- t.Skip("global flag disabled")
- rate := globalconfig.AnalyticsRate()
- defer globalconfig.SetAnalyticsRate(rate)
- globalconfig.SetAnalyticsRate(0.4)
-
- cfg := newConfig()
- assert.Equal(t, 0.4, cfg.analyticsRate)
- })
-
- t.Run("enabled", func(t *testing.T) {
- cfg := newConfig(WithAnalytics(true))
- assert.Equal(t, 1.0, cfg.analyticsRate)
- })
-
- t.Run("override", func(t *testing.T) {
- rate := globalconfig.AnalyticsRate()
- defer globalconfig.SetAnalyticsRate(rate)
- globalconfig.SetAnalyticsRate(0.4)
-
- cfg := newConfig(WithAnalyticsRate(0.2))
- assert.Equal(t, 0.2, cfg.analyticsRate)
- })
-}
diff --git a/contrib/confluentinc/confluent-kafka-go/kafka/tracing.go b/contrib/confluentinc/confluent-kafka-go/kafka/tracing.go
new file mode 100644
index 0000000000..9e4c379ff9
--- /dev/null
+++ b/contrib/confluentinc/confluent-kafka-go/kafka/tracing.go
@@ -0,0 +1,163 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package kafka
+
+import (
+ "github.com/confluentinc/confluent-kafka-go/kafka"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/confluentinc/confluent-kafka-go/internal/tracing"
+)
+
+type wMessage struct {
+ *kafka.Message
+}
+
+func wrapMessage(msg *kafka.Message) tracing.Message {
+ if msg == nil {
+ return nil
+ }
+ return &wMessage{msg}
+}
+
+func (w *wMessage) Unwrap() any {
+ return w.Message
+}
+
+func (w *wMessage) GetValue() []byte {
+ return w.Message.Value
+}
+
+func (w *wMessage) GetKey() []byte {
+ return w.Message.Key
+}
+
+func (w *wMessage) GetHeaders() []tracing.Header {
+ hs := make([]tracing.Header, 0, len(w.Headers))
+ for _, h := range w.Headers {
+ hs = append(hs, wrapHeader(h))
+ }
+ return hs
+}
+
+func (w *wMessage) SetHeaders(headers []tracing.Header) {
+ hs := make([]kafka.Header, 0, len(headers))
+ for _, h := range headers {
+ hs = append(hs, kafka.Header{
+ Key: h.GetKey(),
+ Value: h.GetValue(),
+ })
+ }
+ w.Message.Headers = hs
+}
+
+func (w *wMessage) GetTopicPartition() tracing.TopicPartition {
+ return wrapTopicPartition(w.Message.TopicPartition)
+}
+
+type wHeader struct {
+ kafka.Header
+}
+
+func wrapHeader(h kafka.Header) tracing.Header {
+ return &wHeader{h}
+}
+
+func (w wHeader) GetKey() string {
+ return w.Header.Key
+}
+
+func (w wHeader) GetValue() []byte {
+ return w.Header.Value
+}
+
+type wTopicPartition struct {
+ kafka.TopicPartition
+}
+
+func wrapTopicPartition(tp kafka.TopicPartition) tracing.TopicPartition {
+ return wTopicPartition{tp}
+}
+
+func wrapTopicPartitions(tps []kafka.TopicPartition) []tracing.TopicPartition {
+ wtps := make([]tracing.TopicPartition, 0, len(tps))
+ for _, tp := range tps {
+ wtps = append(wtps, wTopicPartition{tp})
+ }
+ return wtps
+}
+
+func (w wTopicPartition) GetTopic() string {
+ if w.Topic == nil {
+ return ""
+ }
+ return *w.Topic
+}
+
+func (w wTopicPartition) GetPartition() int32 {
+ return w.Partition
+}
+
+func (w wTopicPartition) GetOffset() int64 {
+ return int64(w.Offset)
+}
+
+func (w wTopicPartition) GetError() error {
+ return w.Error
+}
+
+type wEvent struct {
+ kafka.Event
+}
+
+func wrapEvent(event kafka.Event) tracing.Event {
+ return wEvent{event}
+}
+
+func (w wEvent) KafkaMessage() (tracing.Message, bool) {
+ if m, ok := w.Event.(*kafka.Message); ok {
+ return wrapMessage(m), true
+ }
+ return nil, false
+}
+
+func (w wEvent) KafkaOffsetsCommitted() (tracing.OffsetsCommitted, bool) {
+ if oc, ok := w.Event.(kafka.OffsetsCommitted); ok {
+ return wrapOffsetsCommitted(oc), true
+ }
+ return nil, false
+}
+
+type wOffsetsCommitted struct {
+ kafka.OffsetsCommitted
+}
+
+func wrapOffsetsCommitted(oc kafka.OffsetsCommitted) tracing.OffsetsCommitted {
+ return wOffsetsCommitted{oc}
+}
+
+func (w wOffsetsCommitted) GetError() error {
+ return w.Error
+}
+
+func (w wOffsetsCommitted) GetOffsets() []tracing.TopicPartition {
+ ttps := make([]tracing.TopicPartition, 0, len(w.Offsets))
+ for _, tp := range w.Offsets {
+ ttps = append(ttps, wrapTopicPartition(tp))
+ }
+ return ttps
+}
+
+type wConfigMap struct {
+ cfg *kafka.ConfigMap
+}
+
+func wrapConfigMap(cm *kafka.ConfigMap) tracing.ConfigMap {
+ return &wConfigMap{cm}
+}
+
+func (w *wConfigMap) Get(key string, defVal any) (any, error) {
+ return w.cfg.Get(key, defVal)
+}
diff --git a/contrib/dimfeld/httptreemux.v5/httptreemux.go b/contrib/dimfeld/httptreemux.v5/httptreemux.go
index d9ae06e91d..e50c951cb9 100644
--- a/contrib/dimfeld/httptreemux.v5/httptreemux.go
+++ b/contrib/dimfeld/httptreemux.v5/httptreemux.go
@@ -145,7 +145,10 @@ func getRoute(router *httptreemux.TreeMux, w http.ResponseWriter, req *http.Requ
// replace parameter at end of the path, i.e. "../:param"
oldP = "/" + v
newP = "/:" + k
- route = strings.Replace(route, oldP, newP, 1)
+ if strings.HasSuffix(route, oldP) {
+ endPos := strings.LastIndex(route, oldP)
+ route = route[:endPos] + newP
+ }
}
return route, true
}
diff --git a/contrib/dimfeld/httptreemux.v5/httptreemux_test.go b/contrib/dimfeld/httptreemux.v5/httptreemux_test.go
index 6bf900217c..3af094528d 100644
--- a/contrib/dimfeld/httptreemux.v5/httptreemux_test.go
+++ b/contrib/dimfeld/httptreemux.v5/httptreemux_test.go
@@ -670,6 +670,44 @@ func TestTrailingSlashRoutesWithBehaviorUseHandler(t *testing.T) {
})
}
+func TestDuplicateWordsParamsHandler(t *testing.T) {
+ tests := []struct {
+ name string
+ route string
+ url string
+ }{
+ {
+ name: "Test minimal case",
+ route: "/1a/:n",
+ url: "/1a/1",
+ },
+ {
+ name: "Test string with separators",
+ route: "/foo/2by4/bar/:n",
+ url: "/foo/2by4/bar/2",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ router := New()
+ router.GET(tt.route, handler200)
+
+ r := httptest.NewRequest("GET", tt.url, nil)
+ w := httptest.NewRecorder()
+ router.ServeHTTP(w, r)
+
+ spans := mt.FinishedSpans()
+ assert.Equal(1, len(spans))
+ assert.Equal("GET "+tt.route, spans[0].Tag(ext.ResourceName))
+ })
+ }
+}
+
func TestIsSupportedRedirectStatus(t *testing.T) {
tests := []struct {
name string
diff --git a/contrib/google.golang.org/grpc/appsec.go b/contrib/google.golang.org/grpc/appsec.go
index 6330cd8f4f..f7b4aecf53 100644
--- a/contrib/google.golang.org/grpc/appsec.go
+++ b/contrib/google.golang.org/grpc/appsec.go
@@ -7,68 +7,73 @@ package grpc
import (
"context"
+ "sync/atomic"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec/types"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/grpctrace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
-
- "github.com/DataDog/appsec-internal-go/netip"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions"
)
+func applyAction(blockAtomic *atomic.Pointer[actions.BlockGRPC], err *error) bool {
+ if blockAtomic == nil {
+ return false
+ }
+
+ block := blockAtomic.Load()
+ if block == nil {
+ return false
+ }
+
+ code, e := block.GRPCWrapper()
+ *err = status.Error(codes.Code(code), e.Error())
+ return true
+}
+
// UnaryHandler wrapper to use when AppSec is enabled to monitor its execution.
func appsecUnaryHandlerMiddleware(method string, span ddtrace.Span, handler grpc.UnaryHandler) grpc.UnaryHandler {
- trace.SetAppSecEnabledTags(span)
return func(ctx context.Context, req any) (res any, rpcErr error) {
- var blockedErr error
md, _ := metadata.FromIncomingContext(ctx)
- clientIP := setClientIP(ctx, span, md)
- args := types.HandlerOperationArgs{
- Method: method,
- Metadata: md,
- ClientIP: clientIP,
+ var remoteAddr string
+ if p, ok := peer.FromContext(ctx); ok {
+ remoteAddr = p.Addr.String()
}
- ctx, op := grpcsec.StartHandlerOperation(ctx, args, nil, func(op *types.HandlerOperation) {
- dyngo.OnData(op, func(a *sharedsec.GRPCAction) {
- code, err := a.GRPCWrapper()
- blockedErr = status.Error(codes.Code(code), err.Error())
- })
+
+ ctx, op, blockAtomic := grpcsec.StartHandlerOperation(ctx, grpcsec.HandlerOperationArgs{
+ Method: method,
+ Metadata: md,
+ RemoteAddr: remoteAddr,
})
+
defer func() {
- events := op.Finish(types.HandlerOperationRes{})
- if len(events) > 0 {
- grpctrace.SetSecurityEventsTags(span, events)
- }
- if blockedErr != nil {
- op.SetTag(trace.BlockedRequestTag, true)
- rpcErr = blockedErr
+ var statusCode int
+ if statusErr, ok := rpcErr.(interface{ GRPCStatus() *status.Status }); ok && !applyAction(blockAtomic, &rpcErr) {
+ statusCode = int(statusErr.GRPCStatus().Code())
}
- grpctrace.SetRequestMetadataTags(span, md)
- trace.SetTags(span, op.Tags())
+ op.Finish(span, grpcsec.HandlerOperationRes{StatusCode: statusCode})
+ applyAction(blockAtomic, &rpcErr)
}()
// Check if a blocking condition was detected so far with the start operation event (ip blocking, metadata blocking, etc.)
- if blockedErr != nil {
- return nil, blockedErr
+ if applyAction(blockAtomic, &rpcErr) {
+ return
}
// As of our gRPC abstract operation definition, we must fake a receive operation for unary RPCs (the same model fits both unary and streaming RPCs)
- grpcsec.StartReceiveOperation(types.ReceiveOperationArgs{}, op).Finish(types.ReceiveOperationRes{Message: req})
- // Check if a blocking condition was detected so far with the receive operation events
- if blockedErr != nil {
- return nil, blockedErr
+ if _ = grpcsec.MonitorRequestMessage(ctx, req); applyAction(blockAtomic, &rpcErr) {
+ return
}
+ defer func() {
+ _ = grpcsec.MonitorResponseMessage(ctx, res)
+ applyAction(blockAtomic, &rpcErr)
+ }()
+
// Call the original handler - let the deferred function above handle the blocking condition and return error
return handler(ctx, req)
}
@@ -76,98 +81,75 @@ func appsecUnaryHandlerMiddleware(method string, span ddtrace.Span, handler grpc
// StreamHandler wrapper to use when AppSec is enabled to monitor its execution.
func appsecStreamHandlerMiddleware(method string, span ddtrace.Span, handler grpc.StreamHandler) grpc.StreamHandler {
- trace.SetAppSecEnabledTags(span)
return func(srv any, stream grpc.ServerStream) (rpcErr error) {
- // Create a ServerStream wrapper with appsec RPC handler operation and the Go context (to implement the ServerStream interface)
- appsecStream := &appsecServerStream{
- ServerStream: stream,
- // note: the blockedErr field is captured by the RPC handler's OnData closure below
- }
-
ctx := stream.Context()
md, _ := metadata.FromIncomingContext(ctx)
- clientIP := setClientIP(ctx, span, md)
- grpctrace.SetRequestMetadataTags(span, md)
+ var remoteAddr string
+ if p, ok := peer.FromContext(ctx); ok {
+ remoteAddr = p.Addr.String()
+ }
// Create the handler operation and listen to blocking gRPC actions to detect a blocking condition
- args := types.HandlerOperationArgs{
- Method: method,
- Metadata: md,
- ClientIP: clientIP,
- }
- ctx, op := grpcsec.StartHandlerOperation(ctx, args, nil, func(op *types.HandlerOperation) {
- dyngo.OnData(op, func(a *sharedsec.GRPCAction) {
- code, e := a.GRPCWrapper()
- appsecStream.blockedErr = status.Error(codes.Code(code), e.Error())
- })
+ ctx, op, blockAtomic := grpcsec.StartHandlerOperation(ctx, grpcsec.HandlerOperationArgs{
+ Method: method,
+ Metadata: md,
+ RemoteAddr: remoteAddr,
})
- // Finish constructing the appsec stream wrapper and replace the original one
- appsecStream.handlerOperation = op
- appsecStream.ctx = ctx
+ // Create a ServerStream wrapper with appsec RPC handler operation and the Go context (to implement the ServerStream interface)
defer func() {
- events := op.Finish(types.HandlerOperationRes{})
-
- if len(events) > 0 {
- grpctrace.SetSecurityEventsTags(span, events)
- }
-
- if appsecStream.blockedErr != nil {
- op.SetTag(trace.BlockedRequestTag, true)
- // Change the RPC return error with appsec's
- rpcErr = appsecStream.blockedErr
+ var statusCode int
+ if res, ok := rpcErr.(interface{ Status() codes.Code }); ok && !applyAction(blockAtomic, &rpcErr) {
+ statusCode = int(res.Status())
}
- trace.SetTags(span, op.Tags())
+ op.Finish(span, grpcsec.HandlerOperationRes{StatusCode: statusCode})
+ applyAction(blockAtomic, &rpcErr)
}()
// Check if a blocking condition was detected so far with the start operation event (ip blocking, metadata blocking, etc.)
- if appsecStream.blockedErr != nil {
- return appsecStream.blockedErr
+ if applyAction(blockAtomic, &rpcErr) {
+ return
}
// Call the original handler - let the deferred function above handle the blocking condition and return error
- return handler(srv, appsecStream)
+ return handler(srv, &appsecServerStream{
+ ServerStream: stream,
+ handlerOperation: op,
+ ctx: ctx,
+ action: blockAtomic,
+ rpcErr: &rpcErr,
+ })
}
}
type appsecServerStream struct {
grpc.ServerStream
- handlerOperation *types.HandlerOperation
+ handlerOperation *grpcsec.HandlerOperation
ctx context.Context
-
- // blockedErr is used to store the error to return when a blocking sec event is detected.
- blockedErr error
+ action *atomic.Pointer[actions.BlockGRPC]
+ rpcErr *error
}
// RecvMsg implements grpc.ServerStream interface method to monitor its
// execution with AppSec.
-func (ss *appsecServerStream) RecvMsg(m interface{}) (err error) {
- op := grpcsec.StartReceiveOperation(types.ReceiveOperationArgs{}, ss.handlerOperation)
+func (ss *appsecServerStream) RecvMsg(msg any) (err error) {
defer func() {
- op.Finish(types.ReceiveOperationRes{Message: m})
- if ss.blockedErr != nil {
- // Change the function call return error with appsec's
- err = ss.blockedErr
+ if _ = grpcsec.MonitorRequestMessage(ss.ctx, msg); applyAction(ss.action, ss.rpcErr) {
+ err = *ss.rpcErr
}
}()
- return ss.ServerStream.RecvMsg(m)
+ return ss.ServerStream.RecvMsg(msg)
}
-func (ss *appsecServerStream) Context() context.Context {
- return ss.ctx
+func (ss *appsecServerStream) SendMsg(msg any) error {
+ if _ = grpcsec.MonitorResponseMessage(ss.ctx, msg); applyAction(ss.action, ss.rpcErr) {
+ return *ss.rpcErr
+ }
+ return ss.ServerStream.SendMsg(msg)
}
-func setClientIP(ctx context.Context, span ddtrace.Span, md metadata.MD) netip.Addr {
- var remoteAddr string
- if p, ok := peer.FromContext(ctx); ok {
- remoteAddr = p.Addr.String()
- }
- ipTags, clientIP := httptrace.ClientIPTags(md, false, remoteAddr)
- log.Debug("appsec: http client ip detection returned `%s` given the http headers `%v`", clientIP, md)
- if len(ipTags) > 0 {
- trace.SetTags(span, ipTags)
- }
- return clientIP
+func (ss *appsecServerStream) Context() context.Context {
+ return ss.ctx
}
diff --git a/contrib/google.golang.org/grpc/appsec_test.go b/contrib/google.golang.org/grpc/appsec_test.go
index 911efa7c5d..9cce816b26 100644
--- a/contrib/google.golang.org/grpc/appsec_test.go
+++ b/contrib/google.golang.org/grpc/appsec_test.go
@@ -9,6 +9,7 @@ import (
"context"
"encoding/json"
"fmt"
+ "io"
"net"
"testing"
@@ -16,6 +17,7 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@@ -24,26 +26,27 @@ import (
)
func TestAppSec(t *testing.T) {
+ t.Setenv("DD_APPSEC_WAF_TIMEOUT", "1h") // Functionally unlimited
appsec.Start()
defer appsec.Stop()
if !appsec.Enabled() {
t.Skip("appsec disabled")
}
- setup := func() (FixtureClient, mocktracer.Tracer, func()) {
- rig, err := newAppsecRig(false)
+ setup := func(t *testing.T) (FixtureClient, mocktracer.Tracer, func()) {
+ rig, err := newAppsecRig(t, false)
require.NoError(t, err)
mt := mocktracer.Start()
return rig.client, mt, func() {
- rig.Close()
+ assert.NoError(t, rig.Close())
mt.Stop()
}
}
t.Run("unary", func(t *testing.T) {
- client, mt, cleanup := setup()
+ client, mt, cleanup := setup(t)
defer cleanup()
// Send a XSS attack in the payload along with the canary value in the RPC metadata
@@ -64,7 +67,7 @@ func TestAppSec(t *testing.T) {
})
t.Run("stream", func(t *testing.T) {
- client, mt, cleanup := setup()
+ client, mt, cleanup := setup(t)
defer cleanup()
// Send a XSS attack in the payload along with the canary value in the RPC metadata
@@ -73,8 +76,9 @@ func TestAppSec(t *testing.T) {
require.NoError(t, err)
// Send a XSS attack
- err = stream.Send(&FixtureRequest{Name: ""})
- require.NoError(t, err)
+ if err := stream.Send(&FixtureRequest{Name: ""}); err != io.EOF {
+ require.NoError(t, err)
+ }
// Check that the handler was properly called
res, err := stream.Recv()
@@ -83,8 +87,9 @@ func TestAppSec(t *testing.T) {
for i := 0; i < 5; i++ { // Fire multiple times, each time should result in a detected event
// Send a SQLi attack
- err = stream.Send(&FixtureRequest{Name: fmt.Sprintf("-%[1]d' and %[1]d=%[1]d union select * from users--", i)})
- require.NoError(t, err)
+ if err := stream.Send(&FixtureRequest{Name: fmt.Sprintf("-%[1]d' and %[1]d=%[1]d union select * from users--", i)}); err != io.EOF {
+ require.NoError(t, err)
+ }
// Check that the handler was properly called
res, err = stream.Recv()
@@ -121,9 +126,9 @@ func TestAppSec(t *testing.T) {
histogram[tr.Rule.ID]++
}
- require.EqualValues(t, 1, histogram["crs-941-180"]) // XSS attack attempt
- require.EqualValues(t, 5, histogram["crs-942-270"]) // SQL-injection attack attempt
- require.EqualValues(t, 1, histogram["ua0-600-55x"]) // canary rule attack attempt
+ assert.EqualValues(t, 1, histogram["crs-941-180"]) // XSS attack attempt
+ assert.EqualValues(t, 5, histogram["crs-942-270"]) // SQL-injection attack attempt
+ assert.EqualValues(t, 1, histogram["ua0-600-55x"]) // canary rule attack attempt
require.Len(t, histogram, 3)
})
@@ -139,13 +144,13 @@ func TestBlocking(t *testing.T) {
}
setup := func() (FixtureClient, mocktracer.Tracer, func()) {
- rig, err := newAppsecRig(false)
+ rig, err := newAppsecRig(t, false)
require.NoError(t, err)
mt := mocktracer.Start()
return rig.client, mt, func() {
- rig.Close()
+ assert.NoError(t, rig.Close())
mt.Stop()
}
}
@@ -183,7 +188,7 @@ func TestBlocking(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
// Helper assertion function to run for the unary and stream tests
- assert := func(t *testing.T, do func(client FixtureClient)) {
+ withClient := func(t *testing.T, do func(client FixtureClient)) {
client, mt, cleanup := setup()
defer cleanup()
@@ -204,7 +209,7 @@ func TestBlocking(t *testing.T) {
}
t.Run("unary", func(t *testing.T) {
- assert(t, func(client FixtureClient) {
+ withClient(t, func(client FixtureClient) {
ctx := metadata.NewOutgoingContext(context.Background(), tc.md)
reply, err := client.Ping(ctx, &FixtureRequest{Name: tc.message})
require.Nil(t, reply)
@@ -213,19 +218,18 @@ func TestBlocking(t *testing.T) {
})
t.Run("stream", func(t *testing.T) {
- assert(t, func(client FixtureClient) {
+ withClient(t, func(client FixtureClient) {
ctx := metadata.NewOutgoingContext(context.Background(), tc.md)
// Open the stream
stream, err := client.StreamPing(ctx)
require.NoError(t, err)
- defer func() {
- require.NoError(t, stream.CloseSend())
- }()
+ defer func() { assert.NoError(t, stream.CloseSend()) }()
// Send a message
- err = stream.Send(&FixtureRequest{Name: tc.message})
- require.NoError(t, err)
+ if err := stream.Send(&FixtureRequest{Name: tc.message}); err != io.EOF {
+ require.NoError(t, err)
+ }
// Receive a message
reply, err := stream.Recv()
@@ -249,20 +253,20 @@ func TestPasslist(t *testing.T) {
t.Skip("appsec disabled")
}
- setup := func() (FixtureClient, mocktracer.Tracer, func()) {
- rig, err := newAppsecRig(false)
+ setup := func(t *testing.T) (FixtureClient, mocktracer.Tracer, func()) {
+ rig, err := newAppsecRig(t, false)
require.NoError(t, err)
mt := mocktracer.Start()
return rig.client, mt, func() {
- rig.Close()
+ assert.NoError(t, rig.Close())
mt.Stop()
}
}
t.Run("unary", func(t *testing.T) {
- client, mt, cleanup := setup()
+ client, mt, cleanup := setup(t)
defer cleanup()
// Send the payload triggering the sec event thanks to the "zouzou" value in the RPC metadata
@@ -284,7 +288,7 @@ func TestPasslist(t *testing.T) {
})
t.Run("stream", func(t *testing.T) {
- client, mt, cleanup := setup()
+ client, mt, cleanup := setup(t)
defer cleanup()
// Open the steam triggering the sec event thanks to the "zouzou" value in the RPC metadata
@@ -294,8 +298,9 @@ func TestPasslist(t *testing.T) {
// Send some messages
for i := 0; i < 5; i++ {
- err = stream.Send(&FixtureRequest{Name: "hello"})
- require.NoError(t, err)
+ if err := stream.Send(&FixtureRequest{Name: "hello"}); err != io.EOF {
+ require.NoError(t, err)
+ }
// Check that the handler was properly called
res, err := stream.Recv()
@@ -319,7 +324,7 @@ func TestPasslist(t *testing.T) {
})
}
-func newAppsecRig(traceClient bool, interceptorOpts ...Option) (*appsecRig, error) {
+func newAppsecRig(t *testing.T, traceClient bool, interceptorOpts ...Option) (*appsecRig, error) {
interceptorOpts = append([]InterceptorOption{WithServiceName("grpc")}, interceptorOpts...)
server := grpc.NewServer(
@@ -336,7 +341,7 @@ func newAppsecRig(traceClient bool, interceptorOpts ...Option) (*appsecRig, erro
}
_, port, _ := net.SplitHostPort(li.Addr().String())
// start our test fixtureServer.
- go server.Serve(li)
+ go func() { assert.NoError(t, server.Serve(li)) }()
opts := []grpc.DialOption{grpc.WithInsecure()}
if traceClient {
@@ -370,9 +375,9 @@ type appsecRig struct {
client FixtureClient
}
-func (r *appsecRig) Close() {
- r.server.Stop()
- r.conn.Close()
+func (r *appsecRig) Close() error {
+ defer r.server.GracefulStop()
+ return r.conn.Close()
}
type appsecFixtureServer struct {
diff --git a/contrib/google.golang.org/grpc/grpc_test.go b/contrib/google.golang.org/grpc/grpc_test.go
index 9d3313b8ee..2534630689 100644
--- a/contrib/google.golang.org/grpc/grpc_test.go
+++ b/contrib/google.golang.org/grpc/grpc_test.go
@@ -64,7 +64,7 @@ func TestUnary(t *testing.T) {
t.Run(name, func(t *testing.T) {
rig, err := newRig(true, WithServiceName("grpc"), WithRequestTags())
require.NoError(t, err, "error setting up rig")
- defer rig.Close()
+ defer func() { assert.NoError(rig.Close()) }()
client := rig.client
mt := mocktracer.Start()
@@ -226,7 +226,7 @@ func TestStreaming(t *testing.T) {
rig, err := newRig(true, WithServiceName("grpc"))
require.NoError(t, err, "error setting up rig")
- defer rig.Close()
+ defer func() { assert.NoError(t, rig.Close()) }()
span, ctx := tracer.StartSpanFromContext(context.Background(), "a",
tracer.ServiceName("b"),
@@ -251,7 +251,7 @@ func TestStreaming(t *testing.T) {
rig, err := newRig(true, WithServiceName("grpc"), WithStreamMessages(false))
require.NoError(t, err, "error setting up rig")
- defer rig.Close()
+ defer func() { assert.NoError(t, rig.Close()) }()
span, ctx := tracer.StartSpanFromContext(context.Background(), "a",
tracer.ServiceName("b"),
@@ -276,7 +276,7 @@ func TestStreaming(t *testing.T) {
rig, err := newRig(true, WithServiceName("grpc"), WithStreamCalls(false))
require.NoError(t, err, "error setting up rig")
- defer rig.Close()
+ defer func() { assert.NoError(t, rig.Close()) }()
span, ctx := tracer.StartSpanFromContext(context.Background(), "a",
tracer.ServiceName("b"),
@@ -318,7 +318,7 @@ func TestSpanTree(t *testing.T) {
rig, err := newRig(true, WithServiceName("grpc"))
require.NoError(t, err, "error setting up rig")
- defer rig.Close()
+ defer func() { assert.NoError(rig.Close()) }()
{
// Unary Ping rpc leading to trace:
@@ -353,7 +353,7 @@ func TestSpanTree(t *testing.T) {
rig, err := newRig(true, WithServiceName("grpc"), WithRequestTags(), WithMetadataTags())
require.NoError(t, err, "error setting up rig")
- defer rig.Close()
+ defer func() { assert.NoError(rig.Close()) }()
client := rig.client
{
@@ -438,7 +438,7 @@ func TestPass(t *testing.T) {
rig, err := newRig(false, WithServiceName("grpc"))
require.NoError(t, err, "error setting up rig")
- defer rig.Close()
+ defer func() { assert.NoError(rig.Close()) }()
client := rig.client
ctx := context.Background()
@@ -472,7 +472,7 @@ func TestPreservesMetadata(t *testing.T) {
if err != nil {
t.Fatalf("error setting up rig: %s", err)
}
- defer rig.Close()
+ defer func() { assert.NoError(t, rig.Close()) }()
ctx := context.Background()
ctx = metadata.AppendToOutgoingContext(ctx, "test-key", "test-value")
@@ -500,7 +500,7 @@ func TestStreamSendsErrorCode(t *testing.T) {
rig, err := newRig(true)
require.NoError(t, err, "error setting up rig")
- defer rig.Close()
+ defer func() { assert.NoError(t, rig.Close()) }()
ctx := context.Background()
@@ -529,7 +529,7 @@ func TestStreamSendsErrorCode(t *testing.T) {
containsErrorCode = true
}
}
- assert.True(t, containsErrorCode, "at least one span should contain error code")
+ assert.True(t, containsErrorCode, "at least one span should contain error code, the spans were:\n%v", spans)
// ensure that last span contains error code also
gotLastSpanCode := spans[len(spans)-1].Tag(tagCode)
@@ -603,9 +603,9 @@ type rig struct {
client FixtureClient
}
-func (r *rig) Close() {
- r.server.Stop()
- r.conn.Close()
+func (r *rig) Close() error {
+ defer r.server.GracefulStop()
+ return r.conn.Close()
}
func newRigWithInterceptors(
@@ -669,7 +669,7 @@ func TestAnalyticsSettings(t *testing.T) {
if err != nil {
t.Fatalf("error setting up rig: %s", err)
}
- defer rig.Close()
+ defer func() { assert.NoError(t, rig.Close()) }()
client := rig.client
resp, err := client.Ping(context.Background(), &FixtureRequest{Name: "pass"})
@@ -1145,7 +1145,7 @@ func getGenSpansFn(traceClient, traceServer bool) namingschematest.GenSpansFn {
}
rig, err := newRigWithInterceptors(serverInterceptors, clientInterceptors)
require.NoError(t, err)
- defer rig.Close()
+ defer func() { assert.NoError(t, rig.Close()) }()
_, err = rig.client.Ping(context.Background(), &FixtureRequest{Name: "pass"})
require.NoError(t, err)
@@ -1312,7 +1312,7 @@ func TestIssue2050(t *testing.T) {
}
rig, err := newRigWithInterceptors(serverInterceptors, clientInterceptors)
require.NoError(t, err)
- defer rig.Close()
+ defer func() { assert.NoError(t, rig.Close()) }()
// call tracer.Start after integration is initialized, to reproduce the issue
tracer.Start(tracer.WithHTTPClient(httpClient))
diff --git a/contrib/graph-gophers/graphql-go/appsec_test.go b/contrib/graph-gophers/graphql-go/appsec_test.go
index a035b6c78c..a0a025ba91 100644
--- a/contrib/graph-gophers/graphql-go/appsec_test.go
+++ b/contrib/graph-gophers/graphql-go/appsec_test.go
@@ -13,7 +13,7 @@ import (
"path"
"testing"
- graphql "github.com/graph-gophers/graphql-go"
+ "github.com/graph-gophers/graphql-go"
"github.com/stretchr/testify/require"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
@@ -49,7 +49,6 @@ func TestAppSec(t *testing.T) {
testCases := map[string]struct {
query string
variables map[string]any
- events map[string]string
}{
"basic": {
query: `query TestQuery($topLevelId: String!, $nestedId: String!) { topLevel(id: $topLevelId) { nested(id: $nestedId) } }`,
@@ -57,10 +56,6 @@ func TestAppSec(t *testing.T) {
"topLevelId": topLevelAttack,
"nestedId": nestedAttack,
},
- events: map[string]string{
- "test-rule-001": "graphql.resolve(topLevel)",
- "test-rule-002": "graphql.resolve(nested)",
- },
},
"with-default-parameter": {
query: fmt.Sprintf(`query TestQuery($topLevelId: String = %#v, $nestedId: String!) { topLevel(id: $topLevelId) { nested(id: $nestedId) } }`, topLevelAttack),
@@ -68,10 +63,6 @@ func TestAppSec(t *testing.T) {
// "topLevelId" omitted (default value used)
"nestedId": nestedAttack,
},
- events: map[string]string{
- "test-rule-001": "graphql.resolve(topLevel)",
- "test-rule-002": "graphql.resolve(nested)",
- },
},
"embedded-variable": {
query: `query TestQuery($topLevelId: String!, $nestedId: String!) {
@@ -83,10 +74,6 @@ func TestAppSec(t *testing.T) {
"topLevelId": topLevelAttack,
"nestedId": nestedAttack,
},
- events: map[string]string{
- "test-rule-001": "graphql.resolve(topLevelMapped)",
- "test-rule-002": "graphql.resolve(nested)",
- },
},
}
for name, tc := range testCases {
@@ -95,16 +82,19 @@ func TestAppSec(t *testing.T) {
defer mt.Stop()
resp := schema.Exec(context.Background(), tc.query, "TestQuery", tc.variables)
require.Empty(t, resp.Errors)
+
var data map[string]any
err := json.Unmarshal(resp.Data, &data)
require.NoError(t, err)
require.Equal(t, map[string]any{"topLevel": map[string]any{"nested": fmt.Sprintf("%s/%s", topLevelAttack, nestedAttack)}}, data)
+
// Ensure the query produced the expected appsec events
spans := mt.FinishedSpans()
require.NotEmpty(t, spans)
+
// The last finished span (which is GraphQL entry) should have the "_dd.appsec.enabled" tag.
- require.Equal(t, 1, spans[len(spans)-1].Tag("_dd.appsec.enabled"))
- events := make(map[string]string)
+ span := spans[len(spans)-1]
+ require.Equal(t, 1, span.Tag("_dd.appsec.enabled"))
type ddAppsecJSON struct {
Triggers []struct {
Rule struct {
@@ -112,33 +102,19 @@ func TestAppSec(t *testing.T) {
} `json:"rule"`
} `json:"triggers"`
}
- // Search for AppSec events in the set of spans
- for _, span := range spans {
- jsonText, ok := span.Tag("_dd.appsec.json").(string)
- if !ok || jsonText == "" {
- continue
- }
- var parsed ddAppsecJSON
- err := json.Unmarshal([]byte(jsonText), &parsed)
- require.NoError(t, err)
- require.Len(t, parsed.Triggers, 1, "expected exactly 1 trigger on %s span", span.OperationName())
- ruleID := parsed.Triggers[0].Rule.ID
- _, duplicate := events[ruleID]
- require.False(t, duplicate, "found duplicated hit for rule %s", ruleID)
- var origin string
- switch name := span.OperationName(); name {
- case "graphql.field":
- field := span.Tag(tagGraphqlField).(string)
- origin = fmt.Sprintf("%s(%s)", "graphql.resolve", field)
- case "graphql.request":
- origin = "graphql.execute"
- default:
- require.Fail(t, "rule trigger recorded on unecpected span", "rule %s recorded a hit on unexpected span %s", ruleID, name)
- }
- events[ruleID] = origin
+ jsonText, ok := span.Tag("_dd.appsec.json").(string)
+ require.True(t, ok, "expected _dd.appsec.json tag on span")
+
+ var parsed ddAppsecJSON
+ err = json.Unmarshal([]byte(jsonText), &parsed)
+ require.NoError(t, err)
+
+ ids := make([]string, 0, len(parsed.Triggers))
+ for _, trigger := range parsed.Triggers {
+ ids = append(ids, trigger.Rule.ID)
}
- // Ensure they match the expected outcome
- require.Equal(t, tc.events, events)
+
+ require.ElementsMatch(t, ids, []string{"test-rule-001", "test-rule-002"})
})
}
})
diff --git a/contrib/graph-gophers/graphql-go/graphql.go b/contrib/graph-gophers/graphql-go/graphql.go
index 5e3b2a21eb..040e7dff04 100644
--- a/contrib/graph-gophers/graphql-go/graphql.go
+++ b/contrib/graph-gophers/graphql-go/graphql.go
@@ -20,7 +20,6 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/types"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
"gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
@@ -71,12 +70,12 @@ func (t *Tracer) TraceQuery(ctx context.Context, queryString, operationName stri
}
span, ctx := ddtracer.StartSpanFromContext(ctx, t.cfg.querySpanName, opts...)
- ctx, request := graphqlsec.StartRequestOperation(ctx, span, types.RequestOperationArgs{
+ ctx, request := graphqlsec.StartRequestOperation(ctx, graphqlsec.RequestOperationArgs{
RawQuery: queryString,
OperationName: operationName,
Variables: variables,
})
- ctx, query := graphqlsec.StartExecutionOperation(ctx, span, types.ExecutionOperationArgs{
+ ctx, query := graphqlsec.StartExecutionOperation(ctx, graphqlsec.ExecutionOperationArgs{
Query: queryString,
OperationName: operationName,
Variables: variables,
@@ -93,8 +92,8 @@ func (t *Tracer) TraceQuery(ctx context.Context, queryString, operationName stri
err = fmt.Errorf("%s (and %d more errors)", errs[0], n-1)
}
defer span.Finish(ddtracer.WithError(err))
- defer request.Finish(types.RequestOperationRes{Error: err})
- query.Finish(types.ExecutionOperationRes{Error: err})
+ defer request.Finish(span, graphqlsec.RequestOperationRes{Error: err})
+ query.Finish(graphqlsec.ExecutionOperationRes{Error: err})
}
}
@@ -120,7 +119,7 @@ func (t *Tracer) TraceField(ctx context.Context, _, typeName, fieldName string,
}
span, ctx := ddtracer.StartSpanFromContext(ctx, "graphql.field", opts...)
- ctx, field := graphqlsec.StartResolveOperation(ctx, span, types.ResolveOperationArgs{
+ ctx, field := graphqlsec.StartResolveOperation(ctx, graphqlsec.ResolveOperationArgs{
TypeName: typeName,
FieldName: fieldName,
Arguments: arguments,
@@ -128,7 +127,7 @@ func (t *Tracer) TraceField(ctx context.Context, _, typeName, fieldName string,
})
return ctx, func(err *errors.QueryError) {
- field.Finish(types.ResolveOperationRes{Error: err})
+ field.Finish(graphqlsec.ResolveOperationRes{Error: err})
// must explicitly check for nil, see issue golang/go#22729
if err != nil {
diff --git a/contrib/graphql-go/graphql/appsec_test.go b/contrib/graphql-go/graphql/appsec_test.go
index fa4a84465f..178d3bae35 100644
--- a/contrib/graphql-go/graphql/appsec_test.go
+++ b/contrib/graphql-go/graphql/appsec_test.go
@@ -92,7 +92,6 @@ func TestAppSec(t *testing.T) {
testCases := map[string]struct {
query string
variables map[string]any
- events map[string]string
}{
"basic": {
query: `query TestQuery($topLevelId: String!, $nestedId: String!) { topLevel(id: $topLevelId) { nested(id: $nestedId) } }`,
@@ -100,10 +99,6 @@ func TestAppSec(t *testing.T) {
"topLevelId": topLevelAttack,
"nestedId": nestedAttack,
},
- events: map[string]string{
- "test-rule-001": "graphql.resolve(topLevel)",
- "test-rule-002": "graphql.resolve(nested)",
- },
},
"with-default-parameter": {
query: fmt.Sprintf(`query TestQuery($topLevelId: String = %#v, $nestedId: String!) { topLevel(id: $topLevelId) { nested(id: $nestedId) } }`, topLevelAttack),
@@ -111,10 +106,6 @@ func TestAppSec(t *testing.T) {
// "topLevelId" omitted (default value used)
"nestedId": nestedAttack,
},
- events: map[string]string{
- "test-rule-001": "graphql.resolve(topLevel)",
- "test-rule-002": "graphql.resolve(nested)",
- },
},
"embedded-variable": {
query: `query TestQuery($topLevelId: String!, $nestedId: String!) {
@@ -126,10 +117,6 @@ func TestAppSec(t *testing.T) {
"topLevelId": topLevelAttack,
"nestedId": nestedAttack,
},
- events: map[string]string{
- "test-rule-001": "graphql.resolve(topLevelMapped)",
- "test-rule-002": "graphql.resolve(nested)",
- },
},
}
@@ -151,8 +138,8 @@ func TestAppSec(t *testing.T) {
spans := mt.FinishedSpans()
require.NotEmpty(t, spans)
// The last finished span (which is GraphQL entry) should have the "_dd.appsec.enabled" tag.
- require.Equal(t, 1, spans[len(spans)-1].Tag("_dd.appsec.enabled"))
- events := make(map[string]string)
+ span := spans[len(spans)-1]
+ require.Equal(t, 1, span.Tag("_dd.appsec.enabled"))
type ddAppsecJSON struct {
Triggers []struct {
Rule struct {
@@ -160,34 +147,20 @@ func TestAppSec(t *testing.T) {
} `json:"rule"`
} `json:"triggers"`
}
- // Search for AppSec events in the set of spans
- for _, span := range spans {
- jsonText, ok := span.Tag("_dd.appsec.json").(string)
- if !ok || jsonText == "" {
- continue
- }
- var parsed ddAppsecJSON
- err := json.Unmarshal([]byte(jsonText), &parsed)
- require.NoError(t, err)
- require.Len(t, parsed.Triggers, 1, "expected exactly 1 trigger on %s span", span.OperationName())
- ruleID := parsed.Triggers[0].Rule.ID
- _, duplicate := events[ruleID]
- require.False(t, duplicate, "found duplicated hit for rule %s", ruleID)
- var origin string
- switch name := span.OperationName(); name {
- case spanResolve:
- field := span.Tag(tagGraphqlField).(string)
- origin = fmt.Sprintf("%s(%s)", spanResolve, field)
- case spanExecute:
- origin = spanExecute
- default:
- require.Fail(t, "rule trigger recorded on unecpected span", "rule %s recorded a hit on unexpected span %s", ruleID, name)
- }
- events[ruleID] = origin
+ jsonText, ok := span.Tag("_dd.appsec.json").(string)
+ require.True(t, ok, "expected _dd.appsec.json tag on span")
+
+ var parsed ddAppsecJSON
+ err = json.Unmarshal([]byte(jsonText), &parsed)
+ require.NoError(t, err)
+
+ ids := make([]string, 0, len(parsed.Triggers))
+ for _, trigger := range parsed.Triggers {
+ ids = append(ids, trigger.Rule.ID)
}
- // Ensure they match the expected outcome
- require.Equal(t, tc.events, events)
+
+ require.ElementsMatch(t, ids, []string{"test-rule-001", "test-rule-002"})
})
}
})
diff --git a/contrib/graphql-go/graphql/graphql.go b/contrib/graphql-go/graphql/graphql.go
index 7d348fc07e..70c131d314 100644
--- a/contrib/graphql-go/graphql/graphql.go
+++ b/contrib/graphql-go/graphql/graphql.go
@@ -15,7 +15,6 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/types"
"gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
"github.com/graphql-go/graphql"
@@ -64,7 +63,7 @@ type datadogExtension struct{ config }
type contextKey struct{}
type contextData struct {
serverSpan tracer.Span
- requestOp *types.RequestOperation
+ requestOp *graphqlsec.RequestOperation
variables map[string]any
query string
operationName string
@@ -73,7 +72,7 @@ type contextData struct {
// finish closes the top-level request operation, as well as the server span.
func (c *contextData) finish(data any, err error) {
defer c.serverSpan.Finish(tracer.WithError(err))
- c.requestOp.Finish(types.RequestOperationRes{Data: data, Error: err})
+ c.requestOp.Finish(c.serverSpan, graphqlsec.RequestOperationRes{Data: data, Error: err})
}
var extensionName = reflect.TypeOf((*datadogExtension)(nil)).Elem().Name()
@@ -98,7 +97,7 @@ func (i datadogExtension) Init(ctx context.Context, params *graphql.Params) cont
tracer.Tag(ext.Component, componentName),
tracer.Measured(),
)
- ctx, request := graphqlsec.StartRequestOperation(ctx, span, types.RequestOperationArgs{
+ ctx, request := graphqlsec.StartRequestOperation(ctx, graphqlsec.RequestOperationArgs{
RawQuery: params.RequestString,
Variables: params.VariableValues,
OperationName: params.OperationName,
@@ -193,7 +192,7 @@ func (i datadogExtension) ExecutionDidStart(ctx context.Context) (context.Contex
opts = append(opts, tracer.Tag(ext.EventSampleRate, i.config.analyticsRate))
}
span, ctx := tracer.StartSpanFromContext(ctx, spanExecute, opts...)
- ctx, op := graphqlsec.StartExecutionOperation(ctx, span, types.ExecutionOperationArgs{
+ ctx, op := graphqlsec.StartExecutionOperation(ctx, graphqlsec.ExecutionOperationArgs{
Query: data.query,
OperationName: data.operationName,
Variables: data.variables,
@@ -204,7 +203,7 @@ func (i datadogExtension) ExecutionDidStart(ctx context.Context) (context.Contex
defer data.finish(result.Data, err)
span.Finish(tracer.WithError(err))
}()
- op.Finish(types.ExecutionOperationRes{Data: result.Data, Error: err})
+ op.Finish(graphqlsec.ExecutionOperationRes{Data: result.Data, Error: err})
}
}
@@ -240,14 +239,14 @@ func (i datadogExtension) ResolveFieldDidStart(ctx context.Context, info *graphq
opts = append(opts, tracer.Tag(ext.EventSampleRate, i.config.analyticsRate))
}
span, ctx := tracer.StartSpanFromContext(ctx, spanResolve, opts...)
- ctx, op := graphqlsec.StartResolveOperation(ctx, span, types.ResolveOperationArgs{
+ ctx, op := graphqlsec.StartResolveOperation(ctx, graphqlsec.ResolveOperationArgs{
TypeName: info.ParentType.Name(),
FieldName: info.FieldName,
Arguments: collectArguments(info),
})
return ctx, func(result any, err error) {
defer span.Finish(tracer.WithError(err))
- op.Finish(types.ResolveOperationRes{Error: err, Data: result})
+ op.Finish(graphqlsec.ResolveOperationRes{Error: err, Data: result})
}
}
diff --git a/contrib/internal/httptrace/before_handle.go b/contrib/internal/httptrace/before_handle.go
new file mode 100644
index 0000000000..afd40726ed
--- /dev/null
+++ b/contrib/internal/httptrace/before_handle.go
@@ -0,0 +1,78 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package httptrace
+
+import (
+ "net/http"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/options"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec"
+)
+
+// ServeConfig specifies the tracing configuration when using TraceAndServe.
+type ServeConfig struct {
+ // Service specifies the service name to use. If left blank, the global service name
+ // will be inherited.
+ Service string
+ // Resource optionally specifies the resource name for this request.
+ Resource string
+ // QueryParams should be true in order to append the URL query values to the "http.url" tag.
+ QueryParams bool
+ // Route is the request matched route if any, or is empty otherwise
+ Route string
+ // RouteParams specifies framework-specific route parameters (e.g. for route /user/:id coming
+ // in as /user/123 we'll have {"id": "123"}). This field is optional and is used for monitoring
+ // by AppSec. It is only taken into account when AppSec is enabled.
+ RouteParams map[string]string
+ // FinishOpts specifies any options to be used when finishing the request span.
+ FinishOpts []ddtrace.FinishOption
+ // SpanOpts specifies any options to be applied to the request starting span.
+ SpanOpts []ddtrace.StartSpanOption
+}
+
+// BeforeHandle contains functionality that should be executed before a http.Handler runs.
+// It returns the "traced" http.ResponseWriter and http.Request, an additional afterHandle function
+// that should be executed after the Handler runs, and a handled bool that instructs if the request has been handled
+// or not - in case it was handled, the original handler should not run.
+func BeforeHandle(cfg *ServeConfig, w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request, func(), bool) {
+ if cfg == nil {
+ cfg = new(ServeConfig)
+ }
+ opts := options.Copy(cfg.SpanOpts...) // make a copy of cfg.SpanOpts to avoid races
+ if cfg.Service != "" {
+ opts = append(opts, tracer.ServiceName(cfg.Service))
+ }
+ if cfg.Resource != "" {
+ opts = append(opts, tracer.ResourceName(cfg.Resource))
+ }
+ if cfg.Route != "" {
+ opts = append(opts, tracer.Tag(ext.HTTPRoute, cfg.Route))
+ }
+ span, ctx := StartRequestSpan(r, opts...)
+ rw, ddrw := wrapResponseWriter(w)
+ rt := r.WithContext(ctx)
+
+ closeSpan := func() {
+ FinishRequestSpan(span, ddrw.status, cfg.FinishOpts...)
+ }
+ afterHandle := closeSpan
+ handled := false
+ if appsec.Enabled() {
+ secW, secReq, secAfterHandle, secHandled := httpsec.BeforeHandle(rw, rt, span, cfg.RouteParams, nil)
+ afterHandle = func() {
+ secAfterHandle()
+ closeSpan()
+ }
+ rw = secW
+ rt = secReq
+ handled = secHandled
+ }
+ return rw, rt, afterHandle, handled
+}
diff --git a/contrib/internal/httptrace/config.go b/contrib/internal/httptrace/config.go
index 691a529400..23ee0113d5 100644
--- a/contrib/internal/httptrace/config.go
+++ b/contrib/internal/httptrace/config.go
@@ -8,6 +8,8 @@ package httptrace
import (
"os"
"regexp"
+ "strconv"
+ "strings"
"gopkg.in/DataDog/dd-trace-go.v1/internal"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
@@ -22,6 +24,8 @@ const (
envQueryStringRegexp = "DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP"
// envTraceClientIPEnabled is the name of the env var used to specify whether or not to collect client ip in span tags
envTraceClientIPEnabled = "DD_TRACE_CLIENT_IP_ENABLED"
+ // envServerErrorStatuses is the name of the env var used to specify error status codes on http server spans
+ envServerErrorStatuses = "DD_TRACE_HTTP_SERVER_ERROR_STATUSES"
)
// defaultQueryStringRegexp is the regexp used for query string obfuscation if `envQueryStringRegexp` is empty.
@@ -31,6 +35,7 @@ type config struct {
queryStringRegexp *regexp.Regexp // specifies the regexp to use for query string obfuscation.
queryString bool // reports whether the query string should be included in the URL span tag.
traceClientIP bool
+ isStatusError func(statusCode int) bool
}
func newConfig() config {
@@ -38,6 +43,11 @@ func newConfig() config {
queryString: !internal.BoolEnv(envQueryStringDisabled, false),
queryStringRegexp: defaultQueryStringRegexp,
traceClientIP: internal.BoolEnv(envTraceClientIPEnabled, false),
+ isStatusError: isServerError,
+ }
+ v := os.Getenv(envServerErrorStatuses)
+ if fn := GetErrorCodesFromInput(v); fn != nil {
+ c.isStatusError = fn
}
if s, ok := os.LookupEnv(envQueryStringRegexp); !ok {
return c
@@ -51,3 +61,62 @@ func newConfig() config {
}
return c
}
+
+func isServerError(statusCode int) bool {
+ return statusCode >= 500 && statusCode < 600
+}
+
+// GetErrorCodesFromInput parses a comma-separated string s to determine which codes are to be considered errors
+// Its purpose is to support the DD_TRACE_HTTP_SERVER_ERROR_STATUSES env var
+// If error condition cannot be determined from s, `nil` is returned
+// e.g, input of "100,200,300-400" returns a function that returns true on 100, 200, and all values between 300-400, inclusive
+// any input that cannot be translated to integer values returns nil
+func GetErrorCodesFromInput(s string) func(statusCode int) bool {
+ if s == "" {
+ return nil
+ }
+ var codes []int
+ var ranges [][]int
+ vals := strings.Split(s, ",")
+ for _, val := range vals {
+ // "-" indicates a range of values
+ if strings.Contains(val, "-") {
+ bounds := strings.Split(val, "-")
+ if len(bounds) != 2 {
+ log.Debug("Trouble parsing %v due to entry %v, using default error status determination logic", s, val)
+ return nil
+ }
+ before, err := strconv.Atoi(bounds[0])
+ if err != nil {
+ log.Debug("Trouble parsing %v due to entry %v, using default error status determination logic", s, val)
+ return nil
+ }
+ after, err := strconv.Atoi(bounds[1])
+ if err != nil {
+ log.Debug("Trouble parsing %v due to entry %v, using default error status determination logic", s, val)
+ return nil
+ }
+ ranges = append(ranges, []int{before, after})
+ } else {
+ intVal, err := strconv.Atoi(val)
+ if err != nil {
+ log.Debug("Trouble parsing %v due to entry %v, using default error status determination logic", s, val)
+ return nil
+ }
+ codes = append(codes, intVal)
+ }
+ }
+ return func(statusCode int) bool {
+ for _, c := range codes {
+ if c == statusCode {
+ return true
+ }
+ }
+ for _, bounds := range ranges {
+ if statusCode >= bounds[0] && statusCode <= bounds[1] {
+ return true
+ }
+ }
+ return false
+ }
+}
diff --git a/contrib/internal/httptrace/httptrace.go b/contrib/internal/httptrace/httptrace.go
index 6c92fb80ac..6fa5a43242 100644
--- a/contrib/internal/httptrace/httptrace.go
+++ b/contrib/internal/httptrace/httptrace.go
@@ -18,7 +18,7 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec"
"gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema"
)
@@ -34,7 +34,7 @@ func StartRequestSpan(r *http.Request, opts ...ddtrace.StartSpanOption) (tracer.
var ipTags map[string]string
if cfg.traceClientIP {
- ipTags, _ = httptrace.ClientIPTags(r.Header, true, r.RemoteAddr)
+ ipTags, _ = httpsec.ClientIPTags(r.Header, true, r.RemoteAddr)
}
nopts := make([]ddtrace.StartSpanOption, 0, len(opts)+1+len(ipTags))
nopts = append(nopts,
@@ -65,15 +65,21 @@ func StartRequestSpan(r *http.Request, opts ...ddtrace.StartSpanOption) (tracer.
// code. Any further span finish option can be added with opts.
func FinishRequestSpan(s tracer.Span, status int, opts ...tracer.FinishOption) {
var statusStr string
+ // if status is 0, treat it like 200 unless 0 was called out in DD_TRACE_HTTP_SERVER_ERROR_STATUSES
if status == 0 {
- statusStr = "200"
+ if cfg.isStatusError(status) {
+ statusStr = "0"
+ s.SetTag(ext.Error, fmt.Errorf("%s: %s", statusStr, http.StatusText(status)))
+ } else {
+ statusStr = "200"
+ }
} else {
statusStr = strconv.Itoa(status)
+ if cfg.isStatusError(status) {
+ s.SetTag(ext.Error, fmt.Errorf("%s: %s", statusStr, http.StatusText(status)))
+ }
}
s.SetTag(ext.HTTPCode, statusStr)
- if status >= 500 && status < 600 {
- s.SetTag(ext.Error, fmt.Errorf("%s: %s", statusStr, http.StatusText(status)))
- }
s.Finish(opts...)
}
diff --git a/contrib/internal/httptrace/httptrace_test.go b/contrib/internal/httptrace/httptrace_test.go
index c037eebeb7..8733a0a343 100644
--- a/contrib/internal/httptrace/httptrace_test.go
+++ b/contrib/internal/httptrace/httptrace_test.go
@@ -6,9 +6,11 @@
package httptrace
import (
+ "fmt"
"net/http"
"net/http/httptest"
"net/url"
+ "os"
"strconv"
"testing"
@@ -25,6 +27,117 @@ import (
"github.com/stretchr/testify/require"
)
+func TestGetErrorCodesFromInput(t *testing.T) {
+ codesOnly := "400,401,402"
+ rangesOnly := "400-405,408-410"
+ mixed := "400,403-405,407-410,412"
+ invalid1 := "1,100-200-300-"
+ invalid2 := "abc:@3$5^,"
+ empty := ""
+ t.Run("codesOnly", func(t *testing.T) {
+ fn := GetErrorCodesFromInput(codesOnly)
+ for i := 400; i <= 402; i++ {
+ assert.True(t, fn(i))
+ }
+ assert.False(t, fn(500))
+ assert.False(t, fn(0))
+ })
+ t.Run("rangesOnly", func(t *testing.T) {
+ fn := GetErrorCodesFromInput(rangesOnly)
+ for i := 400; i <= 405; i++ {
+ assert.True(t, fn(i))
+ }
+ for i := 408; i <= 410; i++ {
+ assert.True(t, fn(i))
+ }
+ assert.False(t, fn(406))
+ assert.False(t, fn(411))
+ assert.False(t, fn(500))
+ })
+ t.Run("mixed", func(t *testing.T) {
+ fn := GetErrorCodesFromInput(mixed)
+ assert.True(t, fn(400))
+ assert.False(t, fn(401))
+ for i := 403; i <= 405; i++ {
+ assert.True(t, fn(i))
+ }
+ assert.False(t, fn(406))
+ for i := 407; i <= 410; i++ {
+ assert.True(t, fn(i))
+ }
+ assert.False(t, fn(411))
+ assert.False(t, fn(500))
+ })
+ // invalid entries below should result in nils
+ t.Run("invalid1", func(t *testing.T) {
+ fn := GetErrorCodesFromInput(invalid1)
+ assert.Nil(t, fn)
+ })
+ t.Run("invalid2", func(t *testing.T) {
+ fn := GetErrorCodesFromInput(invalid2)
+ assert.Nil(t, fn)
+ })
+ t.Run("empty", func(t *testing.T) {
+ fn := GetErrorCodesFromInput(empty)
+ assert.Nil(t, fn)
+ })
+}
+
+func TestConfiguredErrorStatuses(t *testing.T) {
+ defer os.Unsetenv("DD_TRACE_HTTP_SERVER_ERROR_STATUSES")
+ t.Run("configured", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ os.Setenv("DD_TRACE_HTTP_SERVER_ERROR_STATUSES", "199-399,400,501")
+
+ // reset config based on new DD_TRACE_HTTP_SERVER_ERROR_STATUSES value
+ oldConfig := cfg
+ defer func() { cfg = oldConfig }()
+ cfg = newConfig()
+
+ statuses := []int{0, 200, 400, 500}
+ r := httptest.NewRequest(http.MethodGet, "/test", nil)
+ for i, status := range statuses {
+ sp, _ := StartRequestSpan(r)
+ FinishRequestSpan(sp, status)
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, i+1)
+
+ switch status {
+ case 0:
+ assert.Equal(t, "200", spans[i].Tag(ext.HTTPCode))
+ assert.Nil(t, spans[i].Tag(ext.Error))
+ case 200, 400:
+ assert.Equal(t, strconv.Itoa(status), spans[i].Tag(ext.HTTPCode))
+ assert.Equal(t, fmt.Errorf("%s: %s", strconv.Itoa(status), http.StatusText(status)), spans[i].Tag(ext.Error).(error))
+ case 500:
+ assert.Equal(t, strconv.Itoa(status), spans[i].Tag(ext.HTTPCode))
+ assert.Nil(t, spans[i].Tag(ext.Error))
+ }
+ }
+ })
+ t.Run("zero", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ os.Setenv("DD_TRACE_HTTP_SERVER_ERROR_STATUSES", "0")
+
+ // reset config based on new DD_TRACE_HTTP_SERVER_ERROR_STATUSES value
+ oldConfig := cfg
+ defer func() { cfg = oldConfig }()
+ cfg = newConfig()
+
+ r := httptest.NewRequest(http.MethodGet, "/test", nil)
+ sp, _ := StartRequestSpan(r)
+ FinishRequestSpan(sp, 0)
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 1)
+ assert.Equal(t, "0", spans[0].Tag(ext.HTTPCode))
+ assert.Equal(t, fmt.Errorf("0: %s", http.StatusText(0)), spans[0].Tag(ext.Error).(error))
+ })
+}
+
func TestHeaderTagsFromRequest(t *testing.T) {
mt := mocktracer.Start()
defer mt.Stop()
diff --git a/contrib/internal/httptrace/make_responsewriter.go b/contrib/internal/httptrace/make_responsewriter.go
new file mode 100644
index 0000000000..da08b62d99
--- /dev/null
+++ b/contrib/internal/httptrace/make_responsewriter.go
@@ -0,0 +1,88 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+//go:build ignore
+// +build ignore
+
+// This program generates wrapper implementations of http.ResponseWriter that
+// also satisfy http.Flusher, http.Pusher, http.CloseNotifier and http.Hijacker,
+// based on whether or not the passed in http.ResponseWriter also satisfies
+// them.
+
+package main
+
+import (
+ "os"
+ "text/template"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/lists"
+)
+
+func main() {
+ interfaces := []string{"Flusher", "Pusher", "CloseNotifier", "Hijacker"}
+ var combos [][][]string
+ for pick := len(interfaces); pick > 0; pick-- {
+ combos = append(combos, lists.Combinations(interfaces, pick))
+ }
+ template.Must(template.New("").Parse(tpl)).Execute(os.Stdout, map[string]interface{}{
+ "Interfaces": interfaces,
+ "Combinations": combos,
+ })
+}
+
+var tpl = `// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+// Code generated by make_responsewriter.go DO NOT EDIT
+
+package httptrace
+
+import "net/http"
+
+
+// wrapResponseWriter wraps an underlying http.ResponseWriter so that it can
+// trace the http response codes. It also checks for various http interfaces
+// (Flusher, Pusher, CloseNotifier, Hijacker) and if the underlying
+// http.ResponseWriter implements them it generates an unnamed struct with the
+// appropriate fields.
+//
+// This code is generated because we have to account for all the permutations
+// of the interfaces.
+//
+// In case of any new interfaces or methods we didn't consider here, we also
+// implement the rwUnwrapper interface, which is used internally by
+// the standard library: https://github.com/golang/go/blob/6d89b38ed86e0bfa0ddaba08dc4071e6bb300eea/src/net/http/responsecontroller.go#L42-L44
+func wrapResponseWriter(w http.ResponseWriter) (http.ResponseWriter, *responseWriter) {
+{{- range .Interfaces }}
+ h{{.}}, ok{{.}} := w.(http.{{.}})
+{{- end }}
+
+ mw := newResponseWriter(w)
+ type monitoredResponseWriter interface {
+ http.ResponseWriter
+ Status() int
+ Unwrap() http.ResponseWriter
+ }
+ switch {
+{{- range .Combinations }}
+ {{- range . }}
+ case {{ range $i, $v := . }}{{ if gt $i 0 }} && {{ end }}ok{{ $v }}{{ end }}:
+ w = struct {
+ monitoredResponseWriter
+ {{- range . }}
+ http.{{.}}
+ {{- end }}
+ }{mw{{ range . }}, h{{.}}{{ end }}}
+ {{- end }}
+{{- end }}
+ default:
+ w = mw
+ }
+
+ return w, mw
+}
+`
diff --git a/contrib/internal/httptrace/response_writer.go b/contrib/internal/httptrace/response_writer.go
new file mode 100644
index 0000000000..2bbc31bad7
--- /dev/null
+++ b/contrib/internal/httptrace/response_writer.go
@@ -0,0 +1,51 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package httptrace
+
+//go:generate sh -c "go run make_responsewriter.go | gofmt > trace_gen.go"
+
+import "net/http"
+
+// responseWriter is a small wrapper around an http response writer that will
+// intercept and store the status of a request.
+type responseWriter struct {
+ http.ResponseWriter
+ status int
+}
+
+func newResponseWriter(w http.ResponseWriter) *responseWriter {
+ return &responseWriter{w, 0}
+}
+
+// Status returns the status code that was monitored.
+func (w *responseWriter) Status() int {
+ return w.status
+}
+
+// Write writes the data to the connection as part of an HTTP reply.
+// We explicitly call WriteHeader with the 200 status code
+// in order to get it reported into the span.
+func (w *responseWriter) Write(b []byte) (int, error) {
+ if w.status == 0 {
+ w.WriteHeader(http.StatusOK)
+ }
+ return w.ResponseWriter.Write(b)
+}
+
+// WriteHeader sends an HTTP response header with status code.
+// It also sets the status code to the span.
+func (w *responseWriter) WriteHeader(status int) {
+ if w.status != 0 {
+ return
+ }
+ w.ResponseWriter.WriteHeader(status)
+ w.status = status
+}
+
+// Unwrap returns the underlying wrapped http.ResponseWriter.
+func (w *responseWriter) Unwrap() http.ResponseWriter {
+ return w.ResponseWriter
+}
diff --git a/contrib/internal/httptrace/response_writer_test.go b/contrib/internal/httptrace/response_writer_test.go
new file mode 100644
index 0000000000..78d5ffc6e2
--- /dev/null
+++ b/contrib/internal/httptrace/response_writer_test.go
@@ -0,0 +1,36 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package httptrace
+
+import (
+ "net/http"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_wrapResponseWriter(t *testing.T) {
+ // there doesn't appear to be an easy way to test http.Pusher support via an http request
+ // so we'll just confirm wrapResponseWriter preserves it
+ t.Run("Pusher", func(t *testing.T) {
+ var i struct {
+ http.ResponseWriter
+ http.Pusher
+ }
+ var w http.ResponseWriter = i
+ _, ok := w.(http.ResponseWriter)
+ assert.True(t, ok)
+ _, ok = w.(http.Pusher)
+ assert.True(t, ok)
+
+ w, _ = wrapResponseWriter(w)
+ _, ok = w.(http.ResponseWriter)
+ assert.True(t, ok)
+ _, ok = w.(http.Pusher)
+ assert.True(t, ok)
+ })
+
+}
diff --git a/contrib/net/http/trace_gen.go b/contrib/internal/httptrace/trace_gen.go
similarity index 99%
rename from contrib/net/http/trace_gen.go
rename to contrib/internal/httptrace/trace_gen.go
index db04144454..24e261838e 100644
--- a/contrib/net/http/trace_gen.go
+++ b/contrib/internal/httptrace/trace_gen.go
@@ -5,7 +5,7 @@
// Code generated by make_responsewriter.go DO NOT EDIT
-package http
+package httptrace
import "net/http"
diff --git a/contrib/internal/telemetrytest/telemetry_test.go b/contrib/internal/telemetrytest/telemetry_test.go
index 9dd22278f5..a203228fc2 100644
--- a/contrib/internal/telemetrytest/telemetry_test.go
+++ b/contrib/internal/telemetrytest/telemetry_test.go
@@ -6,6 +6,7 @@ package telemetrytest
import (
"encoding/json"
+ "os"
"os/exec"
"strings"
"testing"
@@ -39,36 +40,53 @@ type contribPkg struct {
var TelemetryImport = "gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
-func (p *contribPkg) hasTelemetryImport() bool {
+func readPackage(t *testing.T, path string) contribPkg {
+ cmd := exec.Command("go", "list", "-json", path)
+ cmd.Stderr = os.Stderr
+ output, err := cmd.Output()
+ require.NoError(t, err)
+ p := contribPkg{}
+ err = json.Unmarshal(output, &p)
+ require.NoError(t, err)
+ return p
+}
+
+func (p *contribPkg) hasTelemetryImport(t *testing.T) bool {
for _, imp := range p.Imports {
if imp == TelemetryImport {
return true
}
}
+ // if we didn't find it imported directly, it might be imported in one of sub-package imports
+ for _, imp := range p.Imports {
+ if strings.HasPrefix(imp, p.ImportPath) {
+ p := readPackage(t, imp)
+ if p.hasTelemetryImport(t) {
+ return true
+ }
+ }
+ }
return false
}
// TestTelemetryEnabled verifies that the expected contrib packages leverage instrumentation telemetry
func TestTelemetryEnabled(t *testing.T) {
body, err := exec.Command("go", "list", "-json", "../../...").Output()
- if err != nil {
- t.Fatalf(err.Error())
- }
+ require.NoError(t, err)
+
var packages []contribPkg
stream := json.NewDecoder(strings.NewReader(string(body)))
for stream.More() {
var out contribPkg
err := stream.Decode(&out)
- if err != nil {
- t.Fatalf(err.Error())
- }
+ require.NoError(t, err)
packages = append(packages, out)
}
for _, pkg := range packages {
if strings.Contains(pkg.ImportPath, "/test") || strings.Contains(pkg.ImportPath, "/internal") {
continue
}
- if !pkg.hasTelemetryImport() {
+ if !pkg.hasTelemetryImport(t) {
t.Fatalf(`package %q is expected use instrumentation telemetry. For more info see https://github.com/DataDog/dd-trace-go/blob/main/contrib/README.md#instrumentation-telemetry`, pkg.ImportPath)
}
}
diff --git a/contrib/jackc/pgx.v5/pgx.go b/contrib/jackc/pgx.v5/pgx.go
index a940d62168..3150d2eb7e 100644
--- a/contrib/jackc/pgx.v5/pgx.go
+++ b/contrib/jackc/pgx.v5/pgx.go
@@ -23,22 +23,33 @@ func init() {
tracer.MarkIntegrationImported("github.com/jackc/pgx.v5")
}
+// Deprecated: this type is unused internally so it will be removed in a future release, please use pgx.Batch instead.
type Batch = pgx.Batch
+// Connect is equivalent to pgx.Connect providing a connection augmented with tracing.
func Connect(ctx context.Context, connString string, opts ...Option) (*pgx.Conn, error) {
connConfig, err := pgx.ParseConfig(connString)
if err != nil {
return nil, err
}
-
return ConnectConfig(ctx, connConfig, opts...)
}
+// ConnectConfig is equivalent to pgx.ConnectConfig providing a connection augmented with tracing.
func ConnectConfig(ctx context.Context, connConfig *pgx.ConnConfig, opts ...Option) (*pgx.Conn, error) {
// The tracer must be set in the config before calling connect
// as pgx takes ownership of the config. QueryTracer traces
// may work, but none of the others will, as they're set in
// unexported fields in the config in the pgx.connect function.
- connConfig.Tracer = newPgxTracer(opts...)
+ connConfig.Tracer = wrapPgxTracer(connConfig.Tracer, opts...)
return pgx.ConnectConfig(ctx, connConfig)
}
+
+// ConnectWithOptions is equivalent to pgx.ConnectWithOptions providing a connection augmented with tracing.
+func ConnectWithOptions(ctx context.Context, connString string, options pgx.ParseConfigOptions, tracerOpts ...Option) (*pgx.Conn, error) {
+ connConfig, err := pgx.ParseConfigWithOptions(connString, options)
+ if err != nil {
+ return nil, err
+ }
+ return ConnectConfig(ctx, connConfig, tracerOpts...)
+}
diff --git a/contrib/jackc/pgx.v5/pgx_tracer.go b/contrib/jackc/pgx.v5/pgx_tracer.go
index f361a3ac48..26e053601e 100644
--- a/contrib/jackc/pgx.v5/pgx_tracer.go
+++ b/contrib/jackc/pgx.v5/pgx_tracer.go
@@ -44,42 +44,82 @@ func (tb *tracedBatchQuery) finish() {
tb.span.Finish(tracer.WithError(tb.data.Err))
}
+type allPgxTracers interface {
+ pgx.QueryTracer
+ pgx.BatchTracer
+ pgx.ConnectTracer
+ pgx.PrepareTracer
+ pgx.CopyFromTracer
+ pgxpool.AcquireTracer
+}
+
+type wrappedPgxTracer struct {
+ query pgx.QueryTracer
+ batch pgx.BatchTracer
+ connect pgx.ConnectTracer
+ prepare pgx.PrepareTracer
+ copyFrom pgx.CopyFromTracer
+ poolAcquire pgxpool.AcquireTracer
+}
+
type pgxTracer struct {
cfg *config
prevBatchQuery *tracedBatchQuery
+ wrapped wrappedPgxTracer
}
var (
- _ pgx.QueryTracer = (*pgxTracer)(nil)
- _ pgx.BatchTracer = (*pgxTracer)(nil)
- _ pgx.ConnectTracer = (*pgxTracer)(nil)
- _ pgx.PrepareTracer = (*pgxTracer)(nil)
- _ pgx.CopyFromTracer = (*pgxTracer)(nil)
- _ pgxpool.AcquireTracer = (*pgxTracer)(nil)
+ _ allPgxTracers = (*pgxTracer)(nil)
)
-func newPgxTracer(opts ...Option) *pgxTracer {
+func wrapPgxTracer(prev pgx.QueryTracer, opts ...Option) *pgxTracer {
cfg := defaultConfig()
for _, opt := range opts {
opt(cfg)
}
cfg.checkStatsdRequired()
- return &pgxTracer{cfg: cfg}
+ tr := &pgxTracer{cfg: cfg}
+ if prev != nil {
+ tr.wrapped.query = prev
+ if batchTr, ok := prev.(pgx.BatchTracer); ok {
+ tr.wrapped.batch = batchTr
+ }
+ if connTr, ok := prev.(pgx.ConnectTracer); ok {
+ tr.wrapped.connect = connTr
+ }
+ if prepareTr, ok := prev.(pgx.PrepareTracer); ok {
+ tr.wrapped.prepare = prepareTr
+ }
+ if copyFromTr, ok := prev.(pgx.CopyFromTracer); ok {
+ tr.wrapped.copyFrom = copyFromTr
+ }
+ if poolAcquireTr, ok := prev.(pgxpool.AcquireTracer); ok {
+ tr.wrapped.poolAcquire = poolAcquireTr
+ }
+ }
+
+ return tr
}
func (t *pgxTracer) TraceQueryStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryStartData) context.Context {
if !t.cfg.traceQuery {
return ctx
}
+ if t.wrapped.query != nil {
+ ctx = t.wrapped.query.TraceQueryStart(ctx, conn, data)
+ }
opts := t.spanOptions(conn.Config(), operationTypeQuery, data.SQL)
_, ctx = tracer.StartSpanFromContext(ctx, "pgx.query", opts...)
return ctx
}
-func (t *pgxTracer) TraceQueryEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceQueryEndData) {
+func (t *pgxTracer) TraceQueryEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryEndData) {
if !t.cfg.traceQuery {
return
}
+ if t.wrapped.query != nil {
+ t.wrapped.query.TraceQueryEnd(ctx, conn, data)
+ }
span, ok := tracer.SpanFromContext(ctx)
if ok {
span.SetTag(tagRowsAffected, data.CommandTag.RowsAffected())
@@ -91,6 +131,9 @@ func (t *pgxTracer) TraceBatchStart(ctx context.Context, conn *pgx.Conn, data pg
if !t.cfg.traceBatch {
return ctx
}
+ if t.wrapped.batch != nil {
+ ctx = t.wrapped.batch.TraceBatchStart(ctx, conn, data)
+ }
opts := t.spanOptions(conn.Config(), operationTypeBatch, "",
tracer.Tag(tagBatchNumQueries, data.Batch.Len()),
)
@@ -102,6 +145,9 @@ func (t *pgxTracer) TraceBatchQuery(ctx context.Context, conn *pgx.Conn, data pg
if !t.cfg.traceBatch {
return
}
+ if t.wrapped.batch != nil {
+ t.wrapped.batch.TraceBatchQuery(ctx, conn, data)
+ }
// Finish the previous batch query span before starting the next one, since pgx doesn't provide hooks or timestamp
// information about when the actual operation started or finished.
// pgx.Batch* types don't support concurrency. This function doesn't support it either.
@@ -118,10 +164,13 @@ func (t *pgxTracer) TraceBatchQuery(ctx context.Context, conn *pgx.Conn, data pg
}
}
-func (t *pgxTracer) TraceBatchEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceBatchEndData) {
+func (t *pgxTracer) TraceBatchEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchEndData) {
if !t.cfg.traceBatch {
return
}
+ if t.wrapped.batch != nil {
+ t.wrapped.batch.TraceBatchEnd(ctx, conn, data)
+ }
if t.prevBatchQuery != nil {
t.prevBatchQuery.finish()
t.prevBatchQuery = nil
@@ -133,6 +182,9 @@ func (t *pgxTracer) TraceCopyFromStart(ctx context.Context, conn *pgx.Conn, data
if !t.cfg.traceCopyFrom {
return ctx
}
+ if t.wrapped.copyFrom != nil {
+ ctx = t.wrapped.copyFrom.TraceCopyFromStart(ctx, conn, data)
+ }
opts := t.spanOptions(conn.Config(), operationTypeCopyFrom, "",
tracer.Tag(tagCopyFromTables, data.TableName),
tracer.Tag(tagCopyFromColumns, data.ColumnNames),
@@ -141,10 +193,13 @@ func (t *pgxTracer) TraceCopyFromStart(ctx context.Context, conn *pgx.Conn, data
return ctx
}
-func (t *pgxTracer) TraceCopyFromEnd(ctx context.Context, _ *pgx.Conn, data pgx.TraceCopyFromEndData) {
+func (t *pgxTracer) TraceCopyFromEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromEndData) {
if !t.cfg.traceCopyFrom {
return
}
+ if t.wrapped.copyFrom != nil {
+ t.wrapped.copyFrom.TraceCopyFromEnd(ctx, conn, data)
+ }
finishSpan(ctx, data.Err)
}
@@ -152,15 +207,21 @@ func (t *pgxTracer) TracePrepareStart(ctx context.Context, conn *pgx.Conn, data
if !t.cfg.tracePrepare {
return ctx
}
+ if t.wrapped.prepare != nil {
+ ctx = t.wrapped.prepare.TracePrepareStart(ctx, conn, data)
+ }
opts := t.spanOptions(conn.Config(), operationTypePrepare, data.SQL)
_, ctx = tracer.StartSpanFromContext(ctx, "pgx.prepare", opts...)
return ctx
}
-func (t *pgxTracer) TracePrepareEnd(ctx context.Context, _ *pgx.Conn, data pgx.TracePrepareEndData) {
+func (t *pgxTracer) TracePrepareEnd(ctx context.Context, conn *pgx.Conn, data pgx.TracePrepareEndData) {
if !t.cfg.tracePrepare {
return
}
+ if t.wrapped.prepare != nil {
+ t.wrapped.prepare.TracePrepareEnd(ctx, conn, data)
+ }
finishSpan(ctx, data.Err)
}
@@ -168,6 +229,9 @@ func (t *pgxTracer) TraceConnectStart(ctx context.Context, data pgx.TraceConnect
if !t.cfg.traceConnect {
return ctx
}
+ if t.wrapped.connect != nil {
+ ctx = t.wrapped.connect.TraceConnectStart(ctx, data)
+ }
opts := t.spanOptions(data.ConnConfig, operationTypeConnect, "")
_, ctx = tracer.StartSpanFromContext(ctx, "pgx.connect", opts...)
return ctx
@@ -177,23 +241,31 @@ func (t *pgxTracer) TraceConnectEnd(ctx context.Context, data pgx.TraceConnectEn
if !t.cfg.traceConnect {
return
}
+ if t.wrapped.connect != nil {
+ t.wrapped.connect.TraceConnectEnd(ctx, data)
+ }
finishSpan(ctx, data.Err)
}
-func (t *pgxTracer) TraceAcquireStart(ctx context.Context, pool *pgxpool.Pool, _ pgxpool.TraceAcquireStartData) context.Context {
+func (t *pgxTracer) TraceAcquireStart(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireStartData) context.Context {
if !t.cfg.traceAcquire {
return ctx
}
+ if t.wrapped.poolAcquire != nil {
+ ctx = t.wrapped.poolAcquire.TraceAcquireStart(ctx, pool, data)
+ }
opts := t.spanOptions(pool.Config().ConnConfig, operationTypeAcquire, "")
_, ctx = tracer.StartSpanFromContext(ctx, "pgx.pool.acquire", opts...)
return ctx
}
-func (t *pgxTracer) TraceAcquireEnd(ctx context.Context, _ *pgxpool.Pool, data pgxpool.TraceAcquireEndData) {
+func (t *pgxTracer) TraceAcquireEnd(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireEndData) {
if !t.cfg.traceAcquire {
return
}
-
+ if t.wrapped.poolAcquire != nil {
+ t.wrapped.poolAcquire.TraceAcquireEnd(ctx, pool, data)
+ }
finishSpan(ctx, data.Err)
}
diff --git a/contrib/jackc/pgx.v5/pgx_tracer_test.go b/contrib/jackc/pgx.v5/pgx_tracer_test.go
index b31b7f3aed..38867a1fe6 100644
--- a/contrib/jackc/pgx.v5/pgx_tracer_test.go
+++ b/contrib/jackc/pgx.v5/pgx_tracer_test.go
@@ -8,7 +8,6 @@ package pgx
import (
"context"
"fmt"
-
"log"
"os"
"testing"
@@ -19,6 +18,8 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/pgconn"
+ "github.com/jackc/pgx/v5/pgxpool"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -69,26 +70,56 @@ func TestMain(m *testing.M) {
}
func TestConnect(t *testing.T) {
- mt := mocktracer.Start()
- defer mt.Stop()
-
- opts := append(tracingAllDisabled(), WithTraceConnect(true))
- runAllOperations(t, opts...)
-
- spans := mt.FinishedSpans()
- require.Len(t, spans, 2)
-
- ps := spans[1]
- assert.Equal(t, "parent", ps.OperationName())
- assert.Equal(t, "parent", ps.Tag(ext.ResourceName))
-
- s := spans[0]
- assertCommonTags(t, s)
- assert.Equal(t, "pgx.connect", s.OperationName())
- assert.Equal(t, "Connect", s.Tag(ext.ResourceName))
- assert.Equal(t, "Connect", s.Tag("db.operation"))
- assert.Equal(t, nil, s.Tag(ext.DBStatement))
- assert.Equal(t, ps.SpanID(), s.ParentID())
+ testCases := []struct {
+ name string
+ newConnCreator func(t *testing.T, prev *pgxMockTracer) createConnFn
+ }{
+ {
+ name: "pool",
+ newConnCreator: func(t *testing.T, prev *pgxMockTracer) createConnFn {
+ opts := append(tracingAllDisabled(), WithTraceConnect(true))
+ return newPoolCreator(nil, opts...)
+ },
+ },
+ {
+ name: "conn",
+ newConnCreator: func(t *testing.T, prev *pgxMockTracer) createConnFn {
+ opts := append(tracingAllDisabled(), WithTraceConnect(true))
+ return newConnCreator(nil, nil, opts...)
+ },
+ },
+ {
+ name: "conn_with_options",
+ newConnCreator: func(t *testing.T, prev *pgxMockTracer) createConnFn {
+ opts := append(tracingAllDisabled(), WithTraceConnect(true))
+ return newConnCreator(nil, &pgx.ParseConfigOptions{}, opts...)
+ },
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ opts := append(tracingAllDisabled(), WithTraceConnect(true))
+ runAllOperations(t, newPoolCreator(nil, opts...))
+
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 2)
+
+ ps := spans[1]
+ assert.Equal(t, "parent", ps.OperationName())
+ assert.Equal(t, "parent", ps.Tag(ext.ResourceName))
+
+ s := spans[0]
+ assertCommonTags(t, s)
+ assert.Equal(t, "pgx.connect", s.OperationName())
+ assert.Equal(t, "Connect", s.Tag(ext.ResourceName))
+ assert.Equal(t, "Connect", s.Tag("db.operation"))
+ assert.Equal(t, nil, s.Tag(ext.DBStatement))
+ assert.Equal(t, ps.SpanID(), s.ParentID())
+ })
+ }
}
func TestQuery(t *testing.T) {
@@ -96,7 +127,7 @@ func TestQuery(t *testing.T) {
defer mt.Stop()
opts := append(tracingAllDisabled(), WithTraceQuery(true))
- runAllOperations(t, opts...)
+ runAllOperations(t, newPoolCreator(nil, opts...))
spans := mt.FinishedSpans()
require.Len(t, spans, 3)
@@ -129,7 +160,7 @@ func TestPrepare(t *testing.T) {
defer mt.Stop()
opts := append(tracingAllDisabled(), WithTracePrepare(true))
- runAllOperations(t, opts...)
+ runAllOperations(t, newPoolCreator(nil, opts...))
spans := mt.FinishedSpans()
require.Len(t, spans, 3)
@@ -162,7 +193,7 @@ func TestBatch(t *testing.T) {
defer mt.Stop()
opts := append(tracingAllDisabled(), WithTraceBatch(true))
- runAllOperations(t, opts...)
+ runAllOperations(t, newPoolCreator(nil, opts...))
spans := mt.FinishedSpans()
require.Len(t, spans, 5)
@@ -211,7 +242,7 @@ func TestCopyFrom(t *testing.T) {
defer mt.Stop()
opts := append(tracingAllDisabled(), WithTraceCopyFrom(true))
- runAllOperations(t, opts...)
+ runAllOperations(t, newPoolCreator(nil, opts...))
spans := mt.FinishedSpans()
require.Len(t, spans, 2)
@@ -236,7 +267,7 @@ func TestAcquire(t *testing.T) {
defer mt.Stop()
opts := append(tracingAllDisabled(), WithTraceAcquire(true))
- runAllOperations(t, opts...)
+ runAllOperations(t, newPoolCreator(nil, opts...))
spans := mt.FinishedSpans()
require.Len(t, spans, 5)
@@ -254,6 +285,54 @@ func TestAcquire(t *testing.T) {
assert.Equal(t, ps.SpanID(), s.ParentID())
}
+// https://github.com/DataDog/dd-trace-go/issues/2908
+func TestWrapTracer(t *testing.T) {
+ testCases := []struct {
+ name string
+ newConnCreator func(t *testing.T, prev *pgxMockTracer) createConnFn
+ wantSpans int
+ wantHooks int
+ }{
+ {
+ name: "pool",
+ newConnCreator: func(t *testing.T, prev *pgxMockTracer) createConnFn {
+ cfg, err := pgxpool.ParseConfig(postgresDSN)
+ require.NoError(t, err)
+ cfg.ConnConfig.Tracer = prev
+ return newPoolCreator(cfg)
+ },
+ wantSpans: 15,
+ wantHooks: 13,
+ },
+ {
+ name: "conn",
+ newConnCreator: func(t *testing.T, prev *pgxMockTracer) createConnFn {
+ cfg, err := pgx.ParseConfig(postgresDSN)
+ require.NoError(t, err)
+ cfg.Tracer = prev
+ return newConnCreator(cfg, nil)
+ },
+ wantSpans: 11,
+ wantHooks: 11, // 13 - 2 pool tracer hooks
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ prevTracer := &pgxMockTracer{
+ called: make(map[string]bool),
+ }
+ runAllOperations(t, tc.newConnCreator(t, prevTracer))
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, tc.wantSpans)
+ assert.Len(t, prevTracer.called, tc.wantHooks, "some hook(s) on the previous tracer were not called")
+ })
+ }
+}
+
func tracingAllDisabled() []Option {
return []Option{
WithTraceConnect(false),
@@ -265,18 +344,65 @@ func tracingAllDisabled() []Option {
}
}
-func runAllOperations(t *testing.T, opts ...Option) {
+type pgxConn interface {
+ QueryRow(ctx context.Context, sql string, args ...any) pgx.Row
+ SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults
+ Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error)
+ CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error)
+}
+
+type createConnFn func(t *testing.T, ctx context.Context) pgxConn
+
+func newPoolCreator(cfg *pgxpool.Config, opts ...Option) createConnFn {
+ return func(t *testing.T, ctx context.Context) pgxConn {
+ var (
+ pool *pgxpool.Pool
+ err error
+ )
+ if cfg == nil {
+ pool, err = NewPool(ctx, postgresDSN, opts...)
+ } else {
+ pool, err = NewPoolWithConfig(ctx, cfg, opts...)
+ }
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ pool.Close()
+ })
+ return pool
+ }
+}
+
+func newConnCreator(cfg *pgx.ConnConfig, connOpts *pgx.ParseConfigOptions, opts ...Option) createConnFn {
+ return func(t *testing.T, ctx context.Context) pgxConn {
+ var (
+ conn *pgx.Conn
+ err error
+ )
+ if cfg != nil {
+ conn, err = ConnectConfig(ctx, cfg, opts...)
+ } else if connOpts != nil {
+ conn, err = ConnectWithOptions(ctx, postgresDSN, *connOpts, opts...)
+ } else {
+ conn, err = Connect(ctx, postgresDSN, opts...)
+ }
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ assert.NoError(t, conn.Close(ctx))
+ })
+ return conn
+ }
+}
+
+func runAllOperations(t *testing.T, createConn createConnFn) {
parent, ctx := tracer.StartSpanFromContext(context.Background(), "parent")
defer parent.Finish()
// Connect
- conn, err := NewPool(ctx, postgresDSN, opts...)
- require.NoError(t, err)
- defer conn.Close()
+ conn := createConn(t, ctx)
// Query
var x int
- err = conn.QueryRow(ctx, `SELECT 1`).Scan(&x)
+ err := conn.QueryRow(ctx, `SELECT 1`).Scan(&x)
require.NoError(t, err)
require.Equal(t, 1, x)
@@ -327,3 +453,69 @@ func assertCommonTags(t *testing.T, s mocktracer.Span) {
assert.Equal(t, "postgres", s.Tag(ext.DBName))
assert.Equal(t, "postgres", s.Tag(ext.DBUser))
}
+
+type pgxMockTracer struct {
+ called map[string]bool
+}
+
+var (
+ _ allPgxTracers = (*pgxMockTracer)(nil)
+)
+
+func (p *pgxMockTracer) TraceQueryStart(ctx context.Context, _ *pgx.Conn, _ pgx.TraceQueryStartData) context.Context {
+ p.called["query.start"] = true
+ return ctx
+}
+
+func (p *pgxMockTracer) TraceQueryEnd(_ context.Context, _ *pgx.Conn, _ pgx.TraceQueryEndData) {
+ p.called["query.end"] = true
+}
+
+func (p *pgxMockTracer) TraceBatchStart(ctx context.Context, _ *pgx.Conn, _ pgx.TraceBatchStartData) context.Context {
+ p.called["batch.start"] = true
+ return ctx
+}
+
+func (p *pgxMockTracer) TraceBatchQuery(_ context.Context, _ *pgx.Conn, _ pgx.TraceBatchQueryData) {
+ p.called["batch.query"] = true
+}
+
+func (p *pgxMockTracer) TraceBatchEnd(_ context.Context, _ *pgx.Conn, _ pgx.TraceBatchEndData) {
+ p.called["batch.end"] = true
+}
+
+func (p *pgxMockTracer) TraceConnectStart(ctx context.Context, _ pgx.TraceConnectStartData) context.Context {
+ p.called["connect.start"] = true
+ return ctx
+}
+
+func (p *pgxMockTracer) TraceConnectEnd(_ context.Context, _ pgx.TraceConnectEndData) {
+ p.called["connect.end"] = true
+}
+
+func (p *pgxMockTracer) TracePrepareStart(ctx context.Context, _ *pgx.Conn, _ pgx.TracePrepareStartData) context.Context {
+ p.called["prepare.start"] = true
+ return ctx
+}
+
+func (p *pgxMockTracer) TracePrepareEnd(_ context.Context, _ *pgx.Conn, _ pgx.TracePrepareEndData) {
+ p.called["prepare.end"] = true
+}
+
+func (p *pgxMockTracer) TraceCopyFromStart(ctx context.Context, _ *pgx.Conn, _ pgx.TraceCopyFromStartData) context.Context {
+ p.called["copyfrom.start"] = true
+ return ctx
+}
+
+func (p *pgxMockTracer) TraceCopyFromEnd(_ context.Context, _ *pgx.Conn, _ pgx.TraceCopyFromEndData) {
+ p.called["copyfrom.end"] = true
+}
+
+func (p *pgxMockTracer) TraceAcquireStart(ctx context.Context, _ *pgxpool.Pool, _ pgxpool.TraceAcquireStartData) context.Context {
+ p.called["pool.acquire.start"] = true
+ return ctx
+}
+
+func (p *pgxMockTracer) TraceAcquireEnd(_ context.Context, _ *pgxpool.Pool, _ pgxpool.TraceAcquireEndData) {
+ p.called["pool.acquire.end"] = true
+}
diff --git a/contrib/jackc/pgx.v5/pgxpool.go b/contrib/jackc/pgx.v5/pgxpool.go
index f4a8cb7ed0..11d93b0859 100644
--- a/contrib/jackc/pgx.v5/pgxpool.go
+++ b/contrib/jackc/pgx.v5/pgxpool.go
@@ -20,7 +20,10 @@ func NewPool(ctx context.Context, connString string, opts ...Option) (*pgxpool.P
}
func NewPoolWithConfig(ctx context.Context, config *pgxpool.Config, opts ...Option) (*pgxpool.Pool, error) {
- tracer := newPgxTracer(opts...)
+ // pgxpool.NewWithConfig panics if the config was not created using pgxpool.ParseConfig, which should ensure everything
+ // is properly initialized, so it doesn't make sense to check for a nil config here.
+
+ tracer := wrapPgxTracer(config.ConnConfig.Tracer, opts...)
config.ConnConfig.Tracer = tracer
pool, err := pgxpool.NewWithConfig(ctx, config)
if err != nil {
diff --git a/contrib/julienschmidt/httprouter/httprouter.go b/contrib/julienschmidt/httprouter/httprouter.go
index 12147a213f..cffaa28876 100644
--- a/contrib/julienschmidt/httprouter/httprouter.go
+++ b/contrib/julienschmidt/httprouter/httprouter.go
@@ -7,68 +7,66 @@
package httprouter // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter"
import (
- "math"
"net/http"
- "strings"
-
- httptraceinternal "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace"
- "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/options"
- httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
"github.com/julienschmidt/httprouter"
-)
-
-const componentName = "julienschmidt/httprouter"
-func init() {
- telemetry.LoadIntegration(componentName)
- tracer.MarkIntegrationImported("github.com/julienschmidt/httprouter")
-}
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/internal/tracing"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
// Router is a traced version of httprouter.Router.
type Router struct {
*httprouter.Router
- config *routerConfig
+ config *tracing.Config
}
// New returns a new router augmented with tracing.
func New(opts ...RouterOption) *Router {
- cfg := new(routerConfig)
- defaults(cfg)
- for _, fn := range opts {
- fn(cfg)
- }
- if !math.IsNaN(cfg.analyticsRate) {
- cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.EventSampleRate, cfg.analyticsRate))
- }
-
- cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.SpanKind, ext.SpanKindServer))
- cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.Component, componentName))
-
+ cfg := tracing.NewConfig(opts...)
log.Debug("contrib/julienschmidt/httprouter: Configuring Router: %#v", cfg)
return &Router{httprouter.New(), cfg}
}
// ServeHTTP implements http.Handler.
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- // get the resource associated to this request
- route := req.URL.Path
- _, ps, _ := r.Router.Lookup(req.Method, route)
- for _, param := range ps {
- route = strings.Replace(route, param.Value, ":"+param.Key, 1)
+ tw, treq, afterHandle, handled := tracing.BeforeHandle(r.config, r.Router, wrapRouter, w, req)
+ defer afterHandle()
+ if handled {
+ return
+ }
+ r.Router.ServeHTTP(tw, treq)
+}
+
+type wRouter struct {
+ *httprouter.Router
+}
+
+func wrapRouter(r *httprouter.Router) tracing.Router {
+ return &wRouter{r}
+}
+
+func (w wRouter) Lookup(method string, path string) (any, []tracing.Param, bool) {
+ h, params, ok := w.Router.Lookup(method, path)
+ return h, wrapParams(params), ok
+}
+
+type wParam struct {
+ httprouter.Param
+}
+
+func wrapParams(params httprouter.Params) []tracing.Param {
+ wParams := make([]tracing.Param, len(params))
+ for i, p := range params {
+ wParams[i] = wParam{p}
}
- resource := req.Method + " " + route
- spanOpts := options.Copy(r.config.spanOpts...) // spanOpts must be a copy of r.config.spanOpts, locally scoped, to avoid races.
- spanOpts = append(spanOpts, httptraceinternal.HeaderTagsFromRequest(req, r.config.headerTags))
+ return wParams
+}
+
+func (w wParam) GetKey() string {
+ return w.Key
+}
- httptrace.TraceAndServe(r.Router, w, req, &httptrace.ServeConfig{
- Service: r.config.serviceName,
- Resource: resource,
- SpanOpts: spanOpts,
- Route: route,
- })
+func (w wParam) GetValue() string {
+ return w.Value
}
diff --git a/contrib/julienschmidt/httprouter/internal/tracing/config.go b/contrib/julienschmidt/httprouter/internal/tracing/config.go
new file mode 100644
index 0000000000..07ee86a765
--- /dev/null
+++ b/contrib/julienschmidt/httprouter/internal/tracing/config.go
@@ -0,0 +1,98 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package tracing
+
+import (
+ "math"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/normalizer"
+)
+
+const defaultServiceName = "http.router"
+
+type Config struct {
+ headerTags *internal.LockMap
+ spanOpts []ddtrace.StartSpanOption
+ serviceName string
+ analyticsRate float64
+}
+
+func NewConfig(opts ...Option) *Config {
+ cfg := new(Config)
+ if internal.BoolEnv("DD_TRACE_HTTPROUTER_ANALYTICS_ENABLED", false) {
+ cfg.analyticsRate = 1.0
+ } else {
+ cfg.analyticsRate = globalconfig.AnalyticsRate()
+ }
+ cfg.serviceName = namingschema.ServiceName(defaultServiceName)
+ cfg.headerTags = globalconfig.HeaderTagMap()
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ if !math.IsNaN(cfg.analyticsRate) {
+ cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.EventSampleRate, cfg.analyticsRate))
+ }
+
+ cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.SpanKind, ext.SpanKindServer))
+ cfg.spanOpts = append(cfg.spanOpts, tracer.Tag(ext.Component, componentName))
+ return cfg
+}
+
+type Option func(*Config)
+
+// WithServiceName sets the given service name for the returned router.
+func WithServiceName(name string) Option {
+ return func(cfg *Config) {
+ cfg.serviceName = name
+ }
+}
+
+// WithSpanOptions applies the given set of options to the span started by the router.
+func WithSpanOptions(opts ...ddtrace.StartSpanOption) Option {
+ return func(cfg *Config) {
+ cfg.spanOpts = opts
+ }
+}
+
+// WithAnalytics enables Trace Analytics for all started spans.
+func WithAnalytics(on bool) Option {
+ return func(cfg *Config) {
+ if on {
+ cfg.analyticsRate = 1.0
+ } else {
+ cfg.analyticsRate = math.NaN()
+ }
+ }
+}
+
+// WithAnalyticsRate sets the sampling rate for Trace Analytics events
+// correlated to started spans.
+func WithAnalyticsRate(rate float64) Option {
+ return func(cfg *Config) {
+ if rate >= 0.0 && rate <= 1.0 {
+ cfg.analyticsRate = rate
+ } else {
+ cfg.analyticsRate = math.NaN()
+ }
+ }
+}
+
+// WithHeaderTags enables the integration to attach HTTP request headers as span tags.
+// Warning:
+// Using this feature can risk exposing sensitive data such as authorization tokens to Datadog.
+// Special headers can not be sub-selected. E.g., an entire Cookie header would be transmitted, without the ability to choose specific Cookies.
+func WithHeaderTags(headers []string) Option {
+ headerTagsMap := normalizer.HeaderTagSlice(headers)
+ return func(cfg *Config) {
+ cfg.headerTags = internal.NewLockMap(headerTagsMap)
+ }
+}
diff --git a/contrib/julienschmidt/httprouter/internal/tracing/tracing.go b/contrib/julienschmidt/httprouter/internal/tracing/tracing.go
new file mode 100644
index 0000000000..26d46b1e79
--- /dev/null
+++ b/contrib/julienschmidt/httprouter/internal/tracing/tracing.go
@@ -0,0 +1,61 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package tracing
+
+import (
+ "net/http"
+ "strings"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/options"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
+)
+
+const componentName = "julienschmidt/httprouter"
+
+func init() {
+ telemetry.LoadIntegration(componentName)
+ tracer.MarkIntegrationImported("github.com/julienschmidt/httprouter")
+}
+
+type Router interface {
+ Lookup(method string, path string) (any, []Param, bool)
+}
+
+type Param interface {
+ GetKey() string
+ GetValue() string
+}
+
+// BeforeHandle is an adapter of httptrace.BeforeHandle for julienschmidt/httprouter types.
+func BeforeHandle[T any, WT Router](
+ cfg *Config,
+ router T,
+ wrapRouter func(T) WT,
+ w http.ResponseWriter,
+ req *http.Request,
+) (http.ResponseWriter, *http.Request, func(), bool) {
+ wRouter := wrapRouter(router)
+ // get the resource associated to this request
+ route := req.URL.Path
+ _, ps, _ := wRouter.Lookup(req.Method, route)
+ for _, param := range ps {
+ route = strings.Replace(route, param.GetValue(), ":"+param.GetKey(), 1)
+ }
+
+ resource := req.Method + " " + route
+ spanOpts := options.Copy(cfg.spanOpts...) // spanOpts must be a copy of r.config.spanOpts, locally scoped, to avoid races.
+ spanOpts = append(spanOpts, httptrace.HeaderTagsFromRequest(req, cfg.headerTags))
+
+ serveCfg := &httptrace.ServeConfig{
+ Service: cfg.serviceName,
+ Resource: resource,
+ SpanOpts: spanOpts,
+ Route: route,
+ }
+ return httptrace.BeforeHandle(serveCfg, w, req)
+}
diff --git a/contrib/julienschmidt/httprouter/option.go b/contrib/julienschmidt/httprouter/option.go
index e8dbf3720b..295a7705c2 100644
--- a/contrib/julienschmidt/httprouter/option.go
+++ b/contrib/julienschmidt/httprouter/option.go
@@ -6,13 +6,9 @@
package httprouter
import (
- "math"
-
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/internal/tracing"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
"gopkg.in/DataDog/dd-trace-go.v1/internal"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/normalizer"
)
const defaultServiceName = "http.router"
@@ -25,62 +21,23 @@ type routerConfig struct {
}
// RouterOption represents an option that can be passed to New.
-type RouterOption func(*routerConfig)
-
-func defaults(cfg *routerConfig) {
- if internal.BoolEnv("DD_TRACE_HTTPROUTER_ANALYTICS_ENABLED", false) {
- cfg.analyticsRate = 1.0
- } else {
- cfg.analyticsRate = globalconfig.AnalyticsRate()
- }
- cfg.serviceName = namingschema.ServiceName(defaultServiceName)
- cfg.headerTags = globalconfig.HeaderTagMap()
-}
+type RouterOption = tracing.Option
// WithServiceName sets the given service name for the returned router.
-func WithServiceName(name string) RouterOption {
- return func(cfg *routerConfig) {
- cfg.serviceName = name
- }
-}
+var WithServiceName = tracing.WithServiceName
// WithSpanOptions applies the given set of options to the span started by the router.
-func WithSpanOptions(opts ...ddtrace.StartSpanOption) RouterOption {
- return func(cfg *routerConfig) {
- cfg.spanOpts = opts
- }
-}
+var WithSpanOptions = tracing.WithSpanOptions
// WithAnalytics enables Trace Analytics for all started spans.
-func WithAnalytics(on bool) RouterOption {
- return func(cfg *routerConfig) {
- if on {
- cfg.analyticsRate = 1.0
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalytics = tracing.WithAnalytics
// WithAnalyticsRate sets the sampling rate for Trace Analytics events
// correlated to started spans.
-func WithAnalyticsRate(rate float64) RouterOption {
- return func(cfg *routerConfig) {
- if rate >= 0.0 && rate <= 1.0 {
- cfg.analyticsRate = rate
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalyticsRate = tracing.WithAnalyticsRate
// WithHeaderTags enables the integration to attach HTTP request headers as span tags.
// Warning:
// Using this feature can risk exposing sensitive data such as authorization tokens to Datadog.
// Special headers can not be sub-selected. E.g., an entire Cookie header would be transmitted, without the ability to choose specific Cookies.
-func WithHeaderTags(headers []string) RouterOption {
- headerTagsMap := normalizer.HeaderTagSlice(headers)
- return func(cfg *routerConfig) {
- cfg.headerTags = internal.NewLockMap(headerTagsMap)
- }
-}
+var WithHeaderTags = tracing.WithHeaderTags
diff --git a/contrib/labstack/echo.v4/appsec_test.go b/contrib/labstack/echo.v4/appsec_test.go
index 5a28bc9a07..52d9a184b9 100644
--- a/contrib/labstack/echo.v4/appsec_test.go
+++ b/contrib/labstack/echo.v4/appsec_test.go
@@ -127,8 +127,8 @@ func TestAppSec(t *testing.T) {
// The span should contain the security event
finished := mt.FinishedSpans()
require.Len(t, finished, 1)
- event := finished[0].Tag("_dd.appsec.json").(string)
- require.NotNil(t, event)
+ event, ok := finished[0].Tag("_dd.appsec.json").(string)
+ require.True(t, ok, "expected string, found %T", finished[0].Tag("_dd.appsec.json"))
require.True(t, strings.Contains(event, "crs-913-120"))
// Wildcards are not named in echo
require.False(t, strings.Contains(event, "myPathParam3"))
@@ -140,7 +140,7 @@ func TestAppSec(t *testing.T) {
mt := mocktracer.Start()
defer mt.Stop()
- req, err := http.NewRequest("POST", srv.URL+"/etc/", nil)
+ req, err := http.NewRequest("POST", srv.URL+"/etc/passwd", nil)
if err != nil {
panic(err)
}
@@ -151,8 +151,8 @@ func TestAppSec(t *testing.T) {
finished := mt.FinishedSpans()
require.Len(t, finished, 1)
- event := finished[0].Tag("_dd.appsec.json").(string)
- require.NotNil(t, event)
+ event, ok := finished[0].Tag("_dd.appsec.json").(string)
+ require.True(t, ok, "expected string, found %T", finished[0].Tag("_dd.appsec.json"))
require.True(t, strings.Contains(event, "server.response.status"))
require.True(t, strings.Contains(event, "nfd-000-001"))
})
diff --git a/contrib/log/slog/slog.go b/contrib/log/slog/slog.go
index 1a27186a27..a29866f294 100644
--- a/contrib/log/slog/slog.go
+++ b/contrib/log/slog/slog.go
@@ -10,6 +10,7 @@ import (
"context"
"io"
"log/slog"
+ "strconv"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
@@ -23,6 +24,13 @@ func init() {
tracer.MarkIntegrationImported("log/slog")
}
+var _ slog.Handler = (*handler)(nil)
+
+type group struct {
+ name string
+ attrs []slog.Attr
+}
+
// NewJSONHandler is a convenience function that returns a *slog.JSONHandler logger enhanced with
// tracing information.
func NewJSONHandler(w io.Writer, opts *slog.HandlerOptions) slog.Handler {
@@ -31,21 +39,71 @@ func NewJSONHandler(w io.Writer, opts *slog.HandlerOptions) slog.Handler {
// WrapHandler enhances the given logger handler attaching tracing information to logs.
func WrapHandler(h slog.Handler) slog.Handler {
- return &handler{h}
+ return &handler{wrapped: h}
}
type handler struct {
- slog.Handler
+ wrapped slog.Handler
+ groups []group
+}
+
+// Enabled calls the wrapped handler Enabled method.
+func (h *handler) Enabled(ctx context.Context, level slog.Level) bool {
+ return h.wrapped.Enabled(ctx, level)
}
// Handle handles the given Record, attaching tracing information if found.
func (h *handler) Handle(ctx context.Context, rec slog.Record) error {
+ reqHandler := h.wrapped
+
+ // We need to ensure the trace id and span id keys are set at the root level:
+ // https://docs.datadoghq.com/tracing/other_telemetry/connect_logs_and_traces/
+ // In case the user has created group loggers, we ignore those and
+ // set them at the root level.
span, ok := tracer.SpanFromContext(ctx)
if ok {
- rec.Add(
- slog.Uint64(ext.LogKeyTraceID, span.Context().TraceID()),
- slog.Uint64(ext.LogKeySpanID, span.Context().SpanID()),
- )
+ traceID := strconv.FormatUint(span.Context().TraceID(), 10)
+ spanID := strconv.FormatUint(span.Context().SpanID(), 10)
+
+ attrs := []slog.Attr{
+ slog.String(ext.LogKeyTraceID, traceID),
+ slog.String(ext.LogKeySpanID, spanID),
+ }
+ reqHandler = reqHandler.WithAttrs(attrs)
+ }
+ for _, g := range h.groups {
+ reqHandler = reqHandler.WithGroup(g.name)
+ if len(g.attrs) > 0 {
+ reqHandler = reqHandler.WithAttrs(g.attrs)
+ }
+ }
+ return reqHandler.Handle(ctx, rec)
+}
+
+// WithAttrs saves the provided attributes associated to the current Group.
+// If Group was not called for the logger, we just call WithAttrs for the wrapped handler.
+func (h *handler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ if len(h.groups) == 0 {
+ return &handler{
+ wrapped: h.wrapped.WithAttrs(attrs),
+ groups: h.groups,
+ }
+ }
+ groups := append([]group{}, h.groups...)
+ curGroup := groups[len(groups)-1]
+ curGroup.attrs = append(curGroup.attrs, attrs...)
+ groups[len(groups)-1] = curGroup
+
+ return &handler{
+ wrapped: h.wrapped,
+ groups: groups,
+ }
+}
+
+// WithGroup saves the provided group to be used later in the Handle method.
+func (h *handler) WithGroup(name string) slog.Handler {
+ return &handler{
+ wrapped: h.wrapped,
+ groups: append(h.groups, group{name: name}),
}
- return h.Handler.Handle(ctx, rec)
}
diff --git a/contrib/log/slog/slog_test.go b/contrib/log/slog/slog_test.go
index 5b74691469..0d25702cb2 100644
--- a/contrib/log/slog/slog_test.go
+++ b/contrib/log/slog/slog_test.go
@@ -9,7 +9,9 @@ import (
"bytes"
"context"
"encoding/json"
+ "io"
"log/slog"
+ "strconv"
"strings"
"testing"
@@ -21,29 +23,37 @@ import (
internallog "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
)
-func assertLogEntry(t *testing.T, rawEntry, wantMsg, wantLevel string) {
+func assertLogEntry(t *testing.T, rawEntry, wantMsg, wantLevel string, span tracer.Span, assertExtra func(t *testing.T, entry map[string]interface{})) {
t.Helper()
- var data map[string]interface{}
- err := json.Unmarshal([]byte(rawEntry), &data)
+ t.Log(rawEntry)
+
+ var entry map[string]interface{}
+ err := json.Unmarshal([]byte(rawEntry), &entry)
require.NoError(t, err)
- require.NotEmpty(t, data)
+ require.NotEmpty(t, entry)
+
+ assert.Equal(t, wantMsg, entry["msg"])
+ assert.Equal(t, wantLevel, entry["level"])
+ assert.NotEmpty(t, entry["time"])
- assert.Equal(t, wantMsg, data["msg"])
- assert.Equal(t, wantLevel, data["level"])
- assert.NotEmpty(t, data["time"])
- assert.NotEmpty(t, data[ext.LogKeyTraceID])
- assert.NotEmpty(t, data[ext.LogKeySpanID])
+ traceID := strconv.FormatUint(span.Context().TraceID(), 10)
+ spanID := strconv.FormatUint(span.Context().SpanID(), 10)
+ assert.Equal(t, traceID, entry[ext.LogKeyTraceID], "trace id not found")
+ assert.Equal(t, spanID, entry[ext.LogKeySpanID], "span id not found")
+
+ if assertExtra != nil {
+ assertExtra(t, entry)
+ }
}
-func testLogger(t *testing.T, createHandler func(b *bytes.Buffer) slog.Handler) {
+func testLogger(t *testing.T, createLogger func(b io.Writer) *slog.Logger, assertExtra func(t *testing.T, entry map[string]interface{})) {
tracer.Start(tracer.WithLogger(internallog.DiscardLogger{}))
defer tracer.Stop()
// create the application logger
var b bytes.Buffer
- h := createHandler(&b)
- logger := slog.New(h)
+ logger := createLogger(&b)
// start a new span
span, ctx := tracer.StartSpanFromContext(context.Background(), "test")
@@ -59,18 +69,146 @@ func testLogger(t *testing.T, createHandler func(b *bytes.Buffer) slog.Handler)
)
// assert log entries contain trace information
require.Len(t, logs, 2)
- assertLogEntry(t, logs[0], "this is an info log with tracing information", "INFO")
- assertLogEntry(t, logs[1], "this is an error log with tracing information", "ERROR")
+ assertLogEntry(t, logs[0], "this is an info log with tracing information", "INFO", span, assertExtra)
+ assertLogEntry(t, logs[1], "this is an error log with tracing information", "ERROR", span, assertExtra)
}
func TestNewJSONHandler(t *testing.T) {
- testLogger(t, func(b *bytes.Buffer) slog.Handler {
- return NewJSONHandler(b, nil)
- })
+ testLogger(
+ t,
+ func(w io.Writer) *slog.Logger {
+ return slog.New(NewJSONHandler(w, nil))
+ },
+ nil,
+ )
}
func TestWrapHandler(t *testing.T) {
- testLogger(t, func(b *bytes.Buffer) slog.Handler {
- return WrapHandler(slog.NewJSONHandler(b, nil))
+ testLogger(
+ t,
+ func(w io.Writer) *slog.Logger {
+ return slog.New(WrapHandler(slog.NewJSONHandler(w, nil)))
+ },
+ nil,
+ )
+}
+
+func TestHandlerWithAttrs(t *testing.T) {
+ testLogger(
+ t,
+ func(w io.Writer) *slog.Logger {
+ return slog.New(NewJSONHandler(w, nil)).
+ With("key1", "val1").
+ With(ext.LogKeyTraceID, "trace-id").
+ With(ext.LogKeySpanID, "span-id")
+ },
+ nil,
+ )
+}
+
+func TestHandlerWithGroup(t *testing.T) {
+ t.Run("simple", func(t *testing.T) {
+ testLogger(
+ t,
+ func(w io.Writer) *slog.Logger {
+ return slog.New(NewJSONHandler(w, nil)).
+ WithGroup("some-group").
+ With("key1", "val1")
+ },
+ func(t *testing.T, entry map[string]interface{}) {
+ assert.Equal(t, map[string]interface{}{
+ "key1": "val1",
+ }, entry["some-group"], "group entry not found")
+ },
+ )
+ })
+
+ t.Run("nested groups", func(t *testing.T) {
+ testLogger(
+ t,
+ func(w io.Writer) *slog.Logger {
+ return slog.New(NewJSONHandler(w, nil)).
+ With("key0", "val0").
+ WithGroup("group1").
+ With("key1", "val1").
+ WithGroup("group1"). // repeat same key again
+ With("key1", "val1").
+ WithGroup("group2").
+ With("key2", "val2").
+ With("key3", "val3")
+ },
+ func(t *testing.T, entry map[string]interface{}) {
+ groupKeys := map[string]interface{}{
+ "key1": "val1",
+ "group1": map[string]interface{}{
+ "key1": "val1",
+ "group2": map[string]interface{}{
+ "key2": "val2",
+ "key3": "val3",
+ },
+ },
+ }
+ assert.Equal(t, "val0", entry["key0"], "root level key not found")
+ assert.Equal(t, groupKeys, entry["group1"], "nested group entries not found")
+ },
+ )
})
}
+
+// TestRecordClone is a regression test for https://github.com/DataDog/dd-trace-go/issues/2918.
+func TestRecordClone(t *testing.T) {
+ // start a new span
+ span, ctx := tracer.StartSpanFromContext(context.Background(), "test")
+ defer span.Finish()
+
+ r := slog.Record{}
+ gate := func() {
+ // Calling Handle below should not overwrite this value
+ r.Add("sentinel-key", "sentinel-value")
+ }
+ h := handlerGate{gate, WrapHandler(slog.NewJSONHandler(io.Discard, nil))}
+ // Up to slog.nAttrsInline (5) attributes are stored in the front array of
+ // the record. Make sure to add more records than that to trigger the bug.
+ for i := 0; i < 5*10; i++ {
+ r.Add("i", i)
+ }
+ h.Handle(ctx, r)
+
+ var foundSentinel bool
+ r.Attrs(func(a slog.Attr) bool {
+ if a.Key == "sentinel-key" {
+ foundSentinel = true
+ return false
+ }
+ return true
+ })
+ assert.True(t, foundSentinel)
+}
+
+func BenchmarkHandler(b *testing.B) {
+ span, ctx := tracer.StartSpanFromContext(context.Background(), "test")
+ defer span.Finish()
+
+ // create a logger with a bunch of nested groups and fields
+ logger := slog.New(NewJSONHandler(io.Discard, nil))
+ logger = logger.With("attr1", "val1").WithGroup("group1").With("attr2", "val2").WithGroup("group3").With("attr3", "val3")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ logger.InfoContext(ctx, "some message")
+ }
+}
+
+// handlerGate calls a gate function before calling the underlying handler. This
+// allows simulating a concurrent modification of the record that happens after
+// Handle is called (and the record has been copied), but before the back array
+// of the Record is written to.
+type handlerGate struct {
+ gate func()
+ slog.Handler
+}
+
+func (h handlerGate) Handle(ctx context.Context, r slog.Record) {
+ h.gate()
+ h.Handler.Handle(ctx, r)
+}
diff --git a/contrib/net/http/http_test.go b/contrib/net/http/http_test.go
index 47e0370e13..c734bbd297 100644
--- a/contrib/net/http/http_test.go
+++ b/contrib/net/http/http_test.go
@@ -313,28 +313,14 @@ func TestServeMuxGo122Patterns(t *testing.T) {
// Check the /foo span
fooSpan := spans[1]
- if fooW.Code == http.StatusOK {
- assert.Equal("/foo", fooSpan.Tag(ext.HTTPRoute))
- assert.Equal("GET /foo", fooSpan.Tag(ext.ResourceName))
- } else {
- // Until our go.mod version is go1.22 or greater, the mux will not
- // understand the "GET /foo" pattern, causing the request to be handled
- // by the 404 handler. Let's assert what we can, and mark the test as
- // skipped to highlight the issue.
- assert.Equal(http.StatusNotFound, fooW.Code)
- assert.Equal(nil, fooSpan.Tag(ext.HTTPRoute))
- // Using "GET " as a resource name doesn't seem ideal, but that's how
- // the mux instrumentation deals with 404s right now.
- assert.Equal("GET ", fooSpan.Tag(ext.ResourceName))
- t.Skip("run `go mod edit -go=1.22` to run the full test")
- }
-
+ assert.Equal(http.StatusOK, fooW.Code)
+ assert.Equal("/foo", fooSpan.Tag(ext.HTTPRoute))
+ assert.Equal("GET /foo", fooSpan.Tag(ext.ResourceName))
}
func TestWrapHandlerWithResourceNameNoRace(_ *testing.T) {
mt := mocktracer.Start()
defer mt.Stop()
- r := httptest.NewRequest("GET", "/", nil)
resourceNamer := func(_ *http.Request) string {
return "custom-resource-name"
}
@@ -346,8 +332,9 @@ func TestWrapHandlerWithResourceNameNoRace(_ *testing.T) {
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
- w := httptest.NewRecorder()
defer wg.Done()
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "/", nil)
mux.ServeHTTP(w, r)
}()
}
@@ -533,10 +520,14 @@ func handler200(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("OK\n"))
}
-func handler500(w http.ResponseWriter, _ *http.Request) {
+func handler500(w http.ResponseWriter, r *http.Request) {
http.Error(w, "500!", http.StatusInternalServerError)
}
+func handler400(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "400!", http.StatusBadRequest)
+}
+
func BenchmarkHttpServeTrace(b *testing.B) {
tracer.Start(tracer.WithLogger(log.DiscardLogger{}))
defer tracer.Stop()
diff --git a/contrib/net/http/option.go b/contrib/net/http/option.go
index cb658b4551..22e63ee3b0 100644
--- a/contrib/net/http/option.go
+++ b/contrib/net/http/option.go
@@ -8,7 +8,9 @@ package http
import (
"math"
"net/http"
+ "os"
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
@@ -18,7 +20,13 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/internal/normalizer"
)
-const defaultServiceName = "http.router"
+const (
+ defaultServiceName = "http.router"
+ // envClientQueryStringEnabled is the name of the env var used to specify whether query string collection is enabled for http client spans.
+ envClientQueryStringEnabled = "DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING"
+ // envClientErrorStatuses is the name of the env var that specifies error status codes on http client spans
+ envClientErrorStatuses = "DD_TRACE_HTTP_CLIENT_ERROR_STATUSES"
+)
type config struct {
serviceName string
@@ -146,6 +154,8 @@ type roundTripperConfig struct {
spanOpts []ddtrace.StartSpanOption
propagation bool
errCheck func(err error) bool
+ queryString bool // reports whether the query string is included in the URL tag for http client spans
+ isStatusError func(statusCode int) bool
}
func newRoundTripperConfig() *roundTripperConfig {
@@ -156,14 +166,22 @@ func newRoundTripperConfig() *roundTripperConfig {
defaultSpanNamer := func(_ *http.Request) string {
return spanName
}
- return &roundTripperConfig{
+
+ c := &roundTripperConfig{
serviceName: namingschema.ServiceNameOverrideV0("", ""),
analyticsRate: globalconfig.AnalyticsRate(),
resourceNamer: defaultResourceNamer,
propagation: true,
spanNamer: defaultSpanNamer,
ignoreRequest: func(_ *http.Request) bool { return false },
+ queryString: internal.BoolEnv(envClientQueryStringEnabled, true),
+ isStatusError: isClientError,
+ }
+ v := os.Getenv(envClientErrorStatuses)
+ if fn := httptrace.GetErrorCodesFromInput(v); fn != nil {
+ c.isStatusError = fn
}
+ return c
}
// A RoundTripperOption represents an option that can be passed to
@@ -264,3 +282,7 @@ func RTWithErrorCheck(fn func(err error) bool) RoundTripperOption {
cfg.errCheck = fn
}
}
+
+func isClientError(statusCode int) bool {
+ return statusCode >= 400 && statusCode < 500
+}
diff --git a/contrib/net/http/roundtripper.go b/contrib/net/http/roundtripper.go
index bd71daa801..7e47c530b6 100644
--- a/contrib/net/http/roundtripper.go
+++ b/contrib/net/http/roundtripper.go
@@ -7,11 +7,13 @@ package http
import (
"fmt"
- "gopkg.in/DataDog/dd-trace-go.v1/appsec/events"
"math"
"net/http"
"os"
"strconv"
+ "strings"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/appsec/events"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
@@ -38,7 +40,7 @@ func (rt *roundTripper) RoundTrip(req *http.Request) (res *http.Response, err er
tracer.SpanType(ext.SpanTypeHTTP),
tracer.ResourceName(resourceName),
tracer.Tag(ext.HTTPMethod, req.Method),
- tracer.Tag(ext.HTTPURL, url.String()),
+ tracer.Tag(ext.HTTPURL, urlFromRequest(req, rt.cfg.queryString)),
tracer.Tag(ext.Component, componentName),
tracer.Tag(ext.SpanKind, ext.SpanKindClient),
tracer.Tag(ext.NetworkDestinationName, url.Hostname()),
@@ -86,7 +88,6 @@ func (rt *roundTripper) RoundTrip(req *http.Request) (res *http.Response, err er
}
res, err = rt.base.RoundTrip(r2)
-
if err != nil {
span.SetTag("http.errors", err.Error())
if rt.cfg.errCheck == nil || rt.cfg.errCheck(err) {
@@ -94,8 +95,7 @@ func (rt *roundTripper) RoundTrip(req *http.Request) (res *http.Response, err er
}
} else {
span.SetTag(ext.HTTPCode, strconv.Itoa(res.StatusCode))
- // treat 5XX as errors
- if res.StatusCode/100 == 5 {
+ if rt.cfg.isStatusError(res.StatusCode) {
span.SetTag("http.errors", res.Status)
span.SetTag(ext.Error, fmt.Errorf("%d: %s", res.StatusCode, http.StatusText(res.StatusCode)))
}
@@ -135,3 +135,32 @@ func WrapClient(c *http.Client, opts ...RoundTripperOption) *http.Client {
c.Transport = WrapRoundTripper(c.Transport, opts...)
return c
}
+
+// urlFromRequest returns the URL from the HTTP request. The URL query string is included in the return object iff queryString is true
+// See https://docs.datadoghq.com/tracing/configure_data_security#redacting-the-query-in-the-url for more information.
+func urlFromRequest(r *http.Request, queryString bool) string {
+ // Quoting net/http comments about net.Request.URL on server requests:
+ // "For most requests, fields other than Path and RawQuery will be
+ // empty. (See RFC 7230, Section 5.3)"
+ // This is why we don't rely on url.URL.String(), url.URL.Host, url.URL.Scheme, etc...
+ var url string
+ path := r.URL.EscapedPath()
+ scheme := r.URL.Scheme
+ if r.TLS != nil {
+ scheme = "https"
+ }
+ if r.Host != "" {
+ url = strings.Join([]string{scheme, "://", r.Host, path}, "")
+ } else {
+ url = path
+ }
+ // Collect the query string if we are allowed to report it and obfuscate it if possible/allowed
+ if queryString && r.URL.RawQuery != "" {
+ query := r.URL.RawQuery
+ url = strings.Join([]string{url, query}, "?")
+ }
+ if frag := r.URL.EscapedFragment(); frag != "" {
+ url = strings.Join([]string{url, frag}, "#")
+ }
+ return url
+}
diff --git a/contrib/net/http/roundtripper_test.go b/contrib/net/http/roundtripper_test.go
index bc9f590fb3..4002647fe6 100644
--- a/contrib/net/http/roundtripper_test.go
+++ b/contrib/net/http/roundtripper_test.go
@@ -11,6 +11,8 @@ import (
"net/http"
"net/http/httptest"
"net/url"
+ "os"
+ "regexp"
"strconv"
"strings"
"testing"
@@ -23,7 +25,7 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
"github.com/stretchr/testify/assert"
@@ -100,58 +102,67 @@ func TestRoundTripper(t *testing.T) {
assert.Equal(t, wantPort, s1.Tag(ext.NetworkDestinationPort))
}
-func TestRoundTripperServerError(t *testing.T) {
- mt := mocktracer.Start()
- defer mt.Stop()
-
- s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- spanctx, err := tracer.Extract(tracer.HTTPHeadersCarrier(r.Header))
- assert.NoError(t, err)
-
- span := tracer.StartSpan("test",
- tracer.ChildOf(spanctx))
- defer span.Finish()
-
- w.WriteHeader(http.StatusInternalServerError)
- w.Write([]byte("Error"))
- }))
- defer s.Close()
-
- rt := WrapRoundTripper(http.DefaultTransport,
- WithBefore(func(req *http.Request, span ddtrace.Span) {
- span.SetTag("CalledBefore", true)
- }),
- WithAfter(func(res *http.Response, span ddtrace.Span) {
- span.SetTag("CalledAfter", true)
- }))
-
+func makeRequests(rt http.RoundTripper, url string, t *testing.T) {
client := &http.Client{
Transport: rt,
}
+ resp, err := client.Get(url + "/400")
+ assert.Nil(t, err)
+ defer resp.Body.Close()
- resp, err := client.Get(s.URL + "/hello/world")
+ resp, err = client.Get(url + "/500")
assert.Nil(t, err)
defer resp.Body.Close()
- spans := mt.FinishedSpans()
- assert.Len(t, spans, 2)
- assert.Equal(t, spans[0].TraceID(), spans[1].TraceID())
+ resp, err = client.Get(url + "/200")
+ assert.Nil(t, err)
+ defer resp.Body.Close()
+}
- s0 := spans[0]
- assert.Equal(t, "test", s0.OperationName())
- assert.Equal(t, "test", s0.Tag(ext.ResourceName))
+func TestRoundTripperErrors(t *testing.T) {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/200", handler200)
+ mux.HandleFunc("/400", handler400)
+ mux.HandleFunc("/500", handler500)
+ s := httptest.NewServer(mux)
+ defer s.Close()
- s1 := spans[1]
- assert.Equal(t, "http.request", s1.OperationName())
- assert.Equal(t, "http.request", s1.Tag(ext.ResourceName))
- assert.Equal(t, "500", s1.Tag(ext.HTTPCode))
- assert.Equal(t, "GET", s1.Tag(ext.HTTPMethod))
- assert.Equal(t, s.URL+"/hello/world", s1.Tag(ext.HTTPURL))
- assert.Equal(t, fmt.Errorf("500: Internal Server Error"), s1.Tag(ext.Error))
- assert.Equal(t, true, s1.Tag("CalledBefore"))
- assert.Equal(t, true, s1.Tag("CalledAfter"))
- assert.Equal(t, ext.SpanKindClient, s1.Tag(ext.SpanKind))
- assert.Equal(t, "net/http", s1.Tag(ext.Component))
+ t.Run("default", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+ rt := WrapRoundTripper(http.DefaultTransport)
+ makeRequests(rt, s.URL, t)
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 3)
+ s := spans[0] // 400 is error
+ assert.Equal(t, "400: Bad Request", s.Tag(ext.Error).(error).Error())
+ assert.Equal(t, "400", s.Tag(ext.HTTPCode))
+ s = spans[1] // 500 is not error
+ assert.Empty(t, s.Tag(ext.Error))
+ assert.Equal(t, "500", s.Tag(ext.HTTPCode))
+ s = spans[2] // 200 is not error
+ assert.Empty(t, s.Tag(ext.Error))
+ assert.Equal(t, "200", s.Tag(ext.HTTPCode))
+ })
+ t.Run("custom", func(t *testing.T) {
+ os.Setenv("DD_TRACE_HTTP_CLIENT_ERROR_STATUSES", "500-510")
+ defer os.Unsetenv("DD_TRACE_HTTP_CLIENT_ERROR_STATUSES")
+ mt := mocktracer.Start()
+ defer mt.Stop()
+ rt := WrapRoundTripper(http.DefaultTransport)
+ makeRequests(rt, s.URL, t)
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 3)
+ s := spans[0] // 400 is not error
+ assert.Empty(t, s.Tag(ext.Error))
+ assert.Equal(t, "400", s.Tag(ext.HTTPCode))
+ s = spans[1] // 500 is error
+ assert.Equal(t, "500: Internal Server Error", s.Tag(ext.Error).(error).Error())
+ assert.Equal(t, "500", s.Tag(ext.HTTPCode))
+ s = spans[2] // 200 is not error
+ assert.Empty(t, s.Tag(ext.Error))
+ assert.Equal(t, "200", s.Tag(ext.HTTPCode))
+ })
}
func TestRoundTripperNetworkError(t *testing.T) {
@@ -555,6 +566,70 @@ func TestSpanOptions(t *testing.T) {
assert.Equal(t, tagValue, spans[0].Tag(tagKey))
}
+func TestClientQueryString(t *testing.T) {
+ s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Hello World"))
+ }))
+ defer s.Close()
+ t.Run("default", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ rt := WrapRoundTripper(http.DefaultTransport)
+ client := &http.Client{
+ Transport: rt,
+ }
+ resp, err := client.Get(s.URL + "/hello/world?querystring=xyz")
+ assert.Nil(t, err)
+ defer resp.Body.Close()
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 1)
+
+ assert.Regexp(t, regexp.MustCompile(`^http://.*?/hello/world\?querystring=xyz$`), spans[0].Tag(ext.HTTPURL))
+ })
+ t.Run("false", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ os.Setenv("DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING", "false")
+ defer os.Unsetenv("DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING")
+
+ rt := WrapRoundTripper(http.DefaultTransport)
+ client := &http.Client{
+ Transport: rt,
+ }
+ resp, err := client.Get(s.URL + "/hello/world?querystring=xyz")
+ assert.Nil(t, err)
+ defer resp.Body.Close()
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 1)
+
+ assert.Regexp(t, regexp.MustCompile(`^http://.*?/hello/world$`), spans[0].Tag(ext.HTTPURL))
+ })
+ // DD_TRACE_HTTP_URL_QUERY_STRING_DISABLED applies only to server spans, not client
+ t.Run("Not impacted by DD_TRACE_HTTP_URL_QUERY_STRING_DISABLED", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ os.Setenv("DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING", "true")
+ os.Setenv("DD_TRACE_HTTP_URL_QUERY_STRING_DISABLED", "true")
+ defer os.Unsetenv("DD_TRACE_HTTP_CLIENT_TAG_QUERY_STRING")
+ defer os.Unsetenv("DD_TRACE_HTTP_URL_QUERY_STRING_DISABLED")
+
+ rt := WrapRoundTripper(http.DefaultTransport)
+ client := &http.Client{
+ Transport: rt,
+ }
+ resp, err := client.Get(s.URL + "/hello/world?querystring=xyz")
+ assert.Nil(t, err)
+ defer resp.Body.Close()
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 1)
+
+ assert.Contains(t, spans[0].Tag(ext.HTTPURL), "/hello/world?querystring=xyz")
+ })
+}
+
func TestRoundTripperPropagation(t *testing.T) {
mt := mocktracer.Start()
defer mt.Stop()
@@ -687,7 +762,7 @@ func TestAppsec(t *testing.T) {
require.Contains(t, serviceSpan.Tags(), "_dd.appsec.json")
appsecJSON := serviceSpan.Tag("_dd.appsec.json")
- require.Contains(t, appsecJSON, httpsec.ServerIoNetURLAddr)
+ require.Contains(t, appsecJSON, addresses.ServerIoNetURLAddr)
require.Contains(t, serviceSpan.Tags(), "_dd.stack")
require.NotContains(t, serviceSpan.Tags(), "error.message")
diff --git a/contrib/net/http/trace.go b/contrib/net/http/trace.go
index b04867c931..f78aa19a9a 100644
--- a/contrib/net/http/trace.go
+++ b/contrib/net/http/trace.go
@@ -5,18 +5,11 @@
package http // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http"
-//go:generate sh -c "go run make_responsewriter.go | gofmt > trace_gen.go"
-
import (
"net/http"
"gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httptrace"
- "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/options"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec"
"gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
)
@@ -28,91 +21,16 @@ func init() {
}
// ServeConfig specifies the tracing configuration when using TraceAndServe.
-type ServeConfig struct {
- // Service specifies the service name to use. If left blank, the global service name
- // will be inherited.
- Service string
- // Resource optionally specifies the resource name for this request.
- Resource string
- // QueryParams should be true in order to append the URL query values to the "http.url" tag.
- QueryParams bool
- // Route is the request matched route if any, or is empty otherwise
- Route string
- // RouteParams specifies framework-specific route parameters (e.g. for route /user/:id coming
- // in as /user/123 we'll have {"id": "123"}). This field is optional and is used for monitoring
- // by AppSec. It is only taken into account when AppSec is enabled.
- RouteParams map[string]string
- // FinishOpts specifies any options to be used when finishing the request span.
- FinishOpts []ddtrace.FinishOption
- // SpanOpts specifies any options to be applied to the request starting span.
- SpanOpts []ddtrace.StartSpanOption
-}
+type ServeConfig = httptrace.ServeConfig
// TraceAndServe serves the handler h using the given ResponseWriter and Request, applying tracing
// according to the specified config.
func TraceAndServe(h http.Handler, w http.ResponseWriter, r *http.Request, cfg *ServeConfig) {
- if cfg == nil {
- cfg = new(ServeConfig)
- }
- opts := options.Copy(cfg.SpanOpts...) // make a copy of cfg.SpanOpts to avoid races
- if cfg.Service != "" {
- opts = append(opts, tracer.ServiceName(cfg.Service))
- }
- if cfg.Resource != "" {
- opts = append(opts, tracer.ResourceName(cfg.Resource))
- }
- if cfg.Route != "" {
- opts = append(opts, tracer.Tag(ext.HTTPRoute, cfg.Route))
- }
- span, ctx := httptrace.StartRequestSpan(r, opts...)
- rw, ddrw := wrapResponseWriter(w)
- defer func() {
- httptrace.FinishRequestSpan(span, ddrw.status, cfg.FinishOpts...)
- }()
-
- if appsec.Enabled() {
- h = httpsec.WrapHandler(h, span, cfg.RouteParams, nil)
- }
- h.ServeHTTP(rw, r.WithContext(ctx))
-}
-
-// responseWriter is a small wrapper around an http response writer that will
-// intercept and store the status of a request.
-type responseWriter struct {
- http.ResponseWriter
- status int
-}
-
-func newResponseWriter(w http.ResponseWriter) *responseWriter {
- return &responseWriter{w, 0}
-}
-
-// Status returns the status code that was monitored.
-func (w *responseWriter) Status() int {
- return w.status
-}
+ tw, tr, afterHandle, handled := httptrace.BeforeHandle(cfg, w, r)
+ defer afterHandle()
-// Write writes the data to the connection as part of an HTTP reply.
-// We explicitly call WriteHeader with the 200 status code
-// in order to get it reported into the span.
-func (w *responseWriter) Write(b []byte) (int, error) {
- if w.status == 0 {
- w.WriteHeader(http.StatusOK)
- }
- return w.ResponseWriter.Write(b)
-}
-
-// WriteHeader sends an HTTP response header with status code.
-// It also sets the status code to the span.
-func (w *responseWriter) WriteHeader(status int) {
- if w.status != 0 {
+ if handled {
return
}
- w.ResponseWriter.WriteHeader(status)
- w.status = status
-}
-
-// Unwrap returns the underlying wrapped http.ResponseWriter.
-func (w *responseWriter) Unwrap() http.ResponseWriter {
- return w.ResponseWriter
+ h.ServeHTTP(tw, tr)
}
diff --git a/contrib/net/http/trace_test.go b/contrib/net/http/trace_test.go
index fe8e062973..e00aaa95fe 100644
--- a/contrib/net/http/trace_test.go
+++ b/contrib/net/http/trace_test.go
@@ -146,26 +146,6 @@ func TestTraceAndServe(t *testing.T) {
assert.Equal("Hello, world!\n", string(slurp))
})
- // there doesn't appear to be an easy way to test http.Pusher support via an http request
- // so we'll just confirm wrapResponseWriter preserves it
- t.Run("Pusher", func(t *testing.T) {
- var i struct {
- http.ResponseWriter
- http.Pusher
- }
- var w http.ResponseWriter = i
- _, ok := w.(http.ResponseWriter)
- assert.True(t, ok)
- _, ok = w.(http.Pusher)
- assert.True(t, ok)
-
- w, _ = wrapResponseWriter(w)
- _, ok = w.(http.ResponseWriter)
- assert.True(t, ok)
- _, ok = w.(http.Pusher)
- assert.True(t, ok)
- })
-
t.Run("distributed", func(t *testing.T) {
mt := mocktracer.Start()
assert := assert.New(t)
diff --git a/contrib/segmentio/kafka.go.v0/example_test.go b/contrib/segmentio/kafka.go.v0/example_test.go
index 11dbd328a3..504b5689a4 100644
--- a/contrib/segmentio/kafka.go.v0/example_test.go
+++ b/contrib/segmentio/kafka.go.v0/example_test.go
@@ -13,7 +13,7 @@ import (
kafkatrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/segmentio/kafka.go.v0"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
- kafka "github.com/segmentio/kafka-go"
+ "github.com/segmentio/kafka-go"
)
func ExampleWriter() {
diff --git a/contrib/segmentio/kafka.go.v0/headers.go b/contrib/segmentio/kafka.go.v0/headers.go
index ce19449d5a..347806d36a 100644
--- a/contrib/segmentio/kafka.go.v0/headers.go
+++ b/contrib/segmentio/kafka.go.v0/headers.go
@@ -6,49 +6,14 @@
package kafka
import (
+ "github.com/segmentio/kafka-go"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/segmentio/kafka.go.v0/internal/tracing"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
-
- "github.com/segmentio/kafka-go"
)
-// A messageCarrier implements TextMapReader/TextMapWriter for extracting/injecting traces on a kafka.Message
-type messageCarrier struct {
- msg *kafka.Message
-}
-
-var _ interface {
- tracer.TextMapReader
- tracer.TextMapWriter
-} = (*messageCarrier)(nil)
-
-// ForeachKey conforms to the TextMapReader interface.
-func (c messageCarrier) ForeachKey(handler func(key, val string) error) error {
- for _, h := range c.msg.Headers {
- err := handler(h.Key, string(h.Value))
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// Set implements TextMapWriter
-func (c messageCarrier) Set(key, val string) {
- // ensure uniqueness of keys
- for i := 0; i < len(c.msg.Headers); i++ {
- if string(c.msg.Headers[i].Key) == key {
- c.msg.Headers = append(c.msg.Headers[:i], c.msg.Headers[i+1:]...)
- i--
- }
- }
- c.msg.Headers = append(c.msg.Headers, kafka.Header{
- Key: key,
- Value: []byte(val),
- })
-}
-
// ExtractSpanContext retrieves the SpanContext from a kafka.Message
func ExtractSpanContext(msg kafka.Message) (ddtrace.SpanContext, error) {
- return tracer.Extract(messageCarrier{&msg})
+ return tracer.Extract(tracing.NewMessageCarrier(wrapMessage(&msg)))
}
diff --git a/contrib/segmentio/kafka.go.v0/internal/tracing/dsm.go b/contrib/segmentio/kafka.go.v0/internal/tracing/dsm.go
new file mode 100644
index 0000000000..728308d0ad
--- /dev/null
+++ b/contrib/segmentio/kafka.go.v0/internal/tracing/dsm.go
@@ -0,0 +1,86 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package tracing
+
+import (
+ "context"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/datastreams"
+ "gopkg.in/DataDog/dd-trace-go.v1/datastreams/options"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func (tr *Tracer) SetConsumeDSMCheckpoint(msg Message) {
+ if !tr.dataStreamsEnabled || msg == nil {
+ return
+ }
+ edges := []string{"direction:in", "topic:" + msg.GetTopic(), "type:kafka"}
+ if tr.kafkaCfg.ConsumerGroupID != "" {
+ edges = append(edges, "group:"+tr.kafkaCfg.ConsumerGroupID)
+ }
+ carrier := NewMessageCarrier(msg)
+ ctx, ok := tracer.SetDataStreamsCheckpointWithParams(
+ datastreams.ExtractFromBase64Carrier(context.Background(), carrier),
+ options.CheckpointParams{PayloadSize: getConsumerMsgSize(msg)},
+ edges...,
+ )
+ if !ok {
+ return
+ }
+ datastreams.InjectToBase64Carrier(ctx, carrier)
+ if tr.kafkaCfg.ConsumerGroupID != "" {
+ // only track Kafka lag if a consumer group is set.
+ // since there is no ack mechanism, we consider that messages read are committed right away.
+ tracer.TrackKafkaCommitOffset(tr.kafkaCfg.ConsumerGroupID, msg.GetTopic(), int32(msg.GetPartition()), msg.GetOffset())
+ }
+}
+
+func (tr *Tracer) SetProduceDSMCheckpoint(msg Message, writer Writer) {
+ if !tr.dataStreamsEnabled || msg == nil {
+ return
+ }
+
+ var topic string
+ if writer.GetTopic() != "" {
+ topic = writer.GetTopic()
+ } else {
+ topic = msg.GetTopic()
+ }
+
+ edges := []string{"direction:out", "topic:" + topic, "type:kafka"}
+ carrier := MessageCarrier{msg}
+ ctx, ok := tracer.SetDataStreamsCheckpointWithParams(
+ datastreams.ExtractFromBase64Carrier(context.Background(), carrier),
+ options.CheckpointParams{PayloadSize: getProducerMsgSize(msg)},
+ edges...,
+ )
+ if !ok {
+ return
+ }
+
+ // Headers will be dropped if the current protocol does not support them
+ datastreams.InjectToBase64Carrier(ctx, carrier)
+}
+
+func getProducerMsgSize(msg Message) (size int64) {
+ for _, header := range msg.GetHeaders() {
+ size += int64(len(header.GetKey()) + len(header.GetValue()))
+ }
+ if msg.GetValue() != nil {
+ size += int64(len(msg.GetValue()))
+ }
+ if msg.GetKey() != nil {
+ size += int64(len(msg.GetKey()))
+ }
+ return size
+}
+
+func getConsumerMsgSize(msg Message) (size int64) {
+ for _, header := range msg.GetHeaders() {
+ size += int64(len(header.GetKey()) + len(header.GetValue()))
+ }
+ return size + int64(len(msg.GetValue())+len(msg.GetKey()))
+}
diff --git a/contrib/segmentio/kafka.go.v0/internal/tracing/message_carrier.go b/contrib/segmentio/kafka.go.v0/internal/tracing/message_carrier.go
new file mode 100644
index 0000000000..f06c40fac1
--- /dev/null
+++ b/contrib/segmentio/kafka.go.v0/internal/tracing/message_carrier.go
@@ -0,0 +1,52 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package tracing
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+// A MessageCarrier implements TextMapReader/TextMapWriter for extracting/injecting traces on a kafka.Message
+type MessageCarrier struct {
+ msg Message
+}
+
+var _ interface {
+ tracer.TextMapReader
+ tracer.TextMapWriter
+} = (*MessageCarrier)(nil)
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c MessageCarrier) ForeachKey(handler func(key, val string) error) error {
+ for _, h := range c.msg.GetHeaders() {
+ err := handler(h.GetKey(), string(h.GetValue()))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Set implements TextMapWriter
+func (c MessageCarrier) Set(key, val string) {
+ headers := c.msg.GetHeaders()
+ // ensure uniqueness of keys
+ for i := 0; i < len(headers); i++ {
+ if headers[i].GetKey() == key {
+ headers = append(headers[:i], headers[i+1:]...)
+ i--
+ }
+ }
+ headers = append(headers, KafkaHeader{
+ Key: key,
+ Value: []byte(val),
+ })
+ c.msg.SetHeaders(headers)
+}
+
+func NewMessageCarrier(msg Message) MessageCarrier {
+ return MessageCarrier{msg: msg}
+}
diff --git a/contrib/segmentio/kafka.go.v0/internal/tracing/tracer.go b/contrib/segmentio/kafka.go.v0/internal/tracing/tracer.go
new file mode 100644
index 0000000000..21baa893f3
--- /dev/null
+++ b/contrib/segmentio/kafka.go.v0/internal/tracing/tracer.go
@@ -0,0 +1,89 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package tracing
+
+import (
+ "math"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema"
+)
+
+const defaultServiceName = "kafka"
+
+type Tracer struct {
+ consumerServiceName string
+ producerServiceName string
+ consumerSpanName string
+ producerSpanName string
+ analyticsRate float64
+ dataStreamsEnabled bool
+ kafkaCfg KafkaConfig
+}
+
+// An Option customizes the Tracer.
+type Option func(tr *Tracer)
+
+func NewTracer(kafkaCfg KafkaConfig, opts ...Option) *Tracer {
+ tr := &Tracer{
+ // analyticsRate: globalConfig.AnalyticsRate(),
+ analyticsRate: math.NaN(),
+ kafkaCfg: kafkaCfg,
+ }
+ if internal.BoolEnv("DD_TRACE_KAFKA_ANALYTICS_ENABLED", false) {
+ tr.analyticsRate = 1.0
+ }
+
+ tr.dataStreamsEnabled = internal.BoolEnv("DD_DATA_STREAMS_ENABLED", false)
+
+ tr.consumerServiceName = namingschema.ServiceName(defaultServiceName)
+ tr.producerServiceName = namingschema.ServiceNameOverrideV0(defaultServiceName, defaultServiceName)
+ tr.consumerSpanName = namingschema.OpName(namingschema.KafkaInbound)
+ tr.producerSpanName = namingschema.OpName(namingschema.KafkaOutbound)
+
+ for _, opt := range opts {
+ opt(tr)
+ }
+ return tr
+}
+
+// WithServiceName sets the Tracer service name to serviceName.
+func WithServiceName(serviceName string) Option {
+ return func(tr *Tracer) {
+ tr.consumerServiceName = serviceName
+ tr.producerServiceName = serviceName
+ }
+}
+
+// WithAnalytics enables Trace Analytics for all started spans.
+func WithAnalytics(on bool) Option {
+ return func(tr *Tracer) {
+ if on {
+ tr.analyticsRate = 1.0
+ } else {
+ tr.analyticsRate = math.NaN()
+ }
+ }
+}
+
+// WithAnalyticsRate sets the sampling rate for Trace Analytics events
+// correlated to started spans.
+func WithAnalyticsRate(rate float64) Option {
+ return func(tr *Tracer) {
+ if rate >= 0.0 && rate <= 1.0 {
+ tr.analyticsRate = rate
+ } else {
+ tr.analyticsRate = math.NaN()
+ }
+ }
+}
+
+// WithDataStreams enables the Data Streams monitoring product features: https://www.datadoghq.com/product/data-streams-monitoring/
+func WithDataStreams() Option {
+ return func(tr *Tracer) {
+ tr.dataStreamsEnabled = true
+ }
+}
diff --git a/contrib/segmentio/kafka.go.v0/option_test.go b/contrib/segmentio/kafka.go.v0/internal/tracing/tracer_test.go
similarity index 83%
rename from contrib/segmentio/kafka.go.v0/option_test.go
rename to contrib/segmentio/kafka.go.v0/internal/tracing/tracer_test.go
index f811ca0446..0bf6bedc56 100644
--- a/contrib/segmentio/kafka.go.v0/option_test.go
+++ b/contrib/segmentio/kafka.go.v0/internal/tracing/tracer_test.go
@@ -3,7 +3,7 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016 Datadog, Inc.
-package kafka
+package tracing
import (
"math"
@@ -16,7 +16,7 @@ import (
func TestAnalyticsSettings(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
- cfg := newConfig()
+ cfg := NewTracer(KafkaConfig{})
assert.True(t, math.IsNaN(cfg.analyticsRate))
})
@@ -26,12 +26,12 @@ func TestAnalyticsSettings(t *testing.T) {
defer globalconfig.SetAnalyticsRate(rate)
globalconfig.SetAnalyticsRate(0.4)
- cfg := newConfig()
+ cfg := NewTracer(KafkaConfig{})
assert.Equal(t, 0.4, cfg.analyticsRate)
})
t.Run("enabled", func(t *testing.T) {
- cfg := newConfig(WithAnalytics(true))
+ cfg := NewTracer(KafkaConfig{}, WithAnalytics(true))
assert.Equal(t, 1.0, cfg.analyticsRate)
})
@@ -40,19 +40,19 @@ func TestAnalyticsSettings(t *testing.T) {
defer globalconfig.SetAnalyticsRate(rate)
globalconfig.SetAnalyticsRate(0.4)
- cfg := newConfig(WithAnalyticsRate(0.2))
+ cfg := NewTracer(KafkaConfig{}, WithAnalyticsRate(0.2))
assert.Equal(t, 0.2, cfg.analyticsRate)
})
t.Run("withEnv", func(t *testing.T) {
t.Setenv("DD_DATA_STREAMS_ENABLED", "true")
- cfg := newConfig()
+ cfg := NewTracer(KafkaConfig{})
assert.True(t, cfg.dataStreamsEnabled)
})
t.Run("optionOverridesEnv", func(t *testing.T) {
t.Setenv("DD_DATA_STREAMS_ENABLED", "false")
- cfg := newConfig()
+ cfg := NewTracer(KafkaConfig{})
WithDataStreams()(cfg)
assert.True(t, cfg.dataStreamsEnabled)
})
diff --git a/contrib/segmentio/kafka.go.v0/internal/tracing/tracing.go b/contrib/segmentio/kafka.go.v0/internal/tracing/tracing.go
new file mode 100644
index 0000000000..9b5f7bbb9b
--- /dev/null
+++ b/contrib/segmentio/kafka.go.v0/internal/tracing/tracing.go
@@ -0,0 +1,92 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+// Package tracing contains tracing logic for the segmentio/kafka-go.v0 instrumentation.
+//
+// WARNING: this package SHOULD NOT import segmentio/kafka-go.
+//
+// The motivation of this package is to support orchestrion, which cannot use the main package because it imports
+// the segmentio/kafka-go package, and since orchestrion modifies the library code itself,
+// this would cause an import cycle.
+package tracing
+
+import (
+ "context"
+ "math"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
+)
+
+const componentName = "segmentio/kafka.go.v0"
+
+func init() {
+ telemetry.LoadIntegration(componentName)
+ tracer.MarkIntegrationImported("github.com/segmentio/kafka-go")
+}
+
+func (tr *Tracer) StartConsumeSpan(ctx context.Context, msg Message) ddtrace.Span {
+ opts := []tracer.StartSpanOption{
+ tracer.ServiceName(tr.consumerServiceName),
+ tracer.ResourceName("Consume Topic " + msg.GetTopic()),
+ tracer.SpanType(ext.SpanTypeMessageConsumer),
+ tracer.Tag(ext.MessagingKafkaPartition, msg.GetPartition()),
+ tracer.Tag("offset", msg.GetOffset()),
+ tracer.Tag(ext.Component, componentName),
+ tracer.Tag(ext.SpanKind, ext.SpanKindConsumer),
+ tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
+ tracer.Tag(ext.KafkaBootstrapServers, tr.kafkaCfg.BootstrapServers),
+ tracer.Measured(),
+ }
+ if !math.IsNaN(tr.analyticsRate) {
+ opts = append(opts, tracer.Tag(ext.EventSampleRate, tr.analyticsRate))
+ }
+ // kafka supports headers, so try to extract a span context
+ carrier := NewMessageCarrier(msg)
+ if spanctx, err := tracer.Extract(carrier); err == nil {
+ opts = append(opts, tracer.ChildOf(spanctx))
+ }
+ span, _ := tracer.StartSpanFromContext(ctx, tr.consumerSpanName, opts...)
+ // reinject the span context so consumers can pick it up
+ if err := tracer.Inject(span.Context(), carrier); err != nil {
+ log.Debug("contrib/segmentio/kafka.go.v0: Failed to inject span context into carrier in reader, %v", err)
+ }
+ return span
+}
+
+func (tr *Tracer) StartProduceSpan(ctx context.Context, writer Writer, msg Message, spanOpts ...tracer.StartSpanOption) ddtrace.Span {
+ opts := []tracer.StartSpanOption{
+ tracer.ServiceName(tr.producerServiceName),
+ tracer.SpanType(ext.SpanTypeMessageProducer),
+ tracer.Tag(ext.Component, componentName),
+ tracer.Tag(ext.SpanKind, ext.SpanKindProducer),
+ tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
+ tracer.Tag(ext.KafkaBootstrapServers, tr.kafkaCfg.BootstrapServers),
+ }
+ if writer.GetTopic() != "" {
+ opts = append(opts, tracer.ResourceName("Produce Topic "+writer.GetTopic()))
+ } else {
+ opts = append(opts, tracer.ResourceName("Produce Topic "+msg.GetTopic()))
+ }
+ if !math.IsNaN(tr.analyticsRate) {
+ opts = append(opts, tracer.Tag(ext.EventSampleRate, tr.analyticsRate))
+ }
+ opts = append(opts, spanOpts...)
+ carrier := NewMessageCarrier(msg)
+ span, _ := tracer.StartSpanFromContext(ctx, tr.producerSpanName, opts...)
+ if err := tracer.Inject(span.Context(), carrier); err != nil {
+ log.Debug("contrib/segmentio/kafka.go.v0: Failed to inject span context into carrier in writer, %v", err)
+ }
+ return span
+}
+
+func (*Tracer) FinishProduceSpan(span ddtrace.Span, partition int, offset int64, err error) {
+ span.SetTag(ext.MessagingKafkaPartition, partition)
+ span.SetTag("offset", offset)
+ span.Finish(tracer.WithError(err))
+}
diff --git a/contrib/segmentio/kafka.go.v0/internal/tracing/types.go b/contrib/segmentio/kafka.go.v0/internal/tracing/types.go
new file mode 100644
index 0000000000..6c17b179f4
--- /dev/null
+++ b/contrib/segmentio/kafka.go.v0/internal/tracing/types.go
@@ -0,0 +1,44 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package tracing
+
+type Header interface {
+ GetKey() string
+ GetValue() []byte
+}
+
+type KafkaHeader struct {
+ Key string
+ Value []byte
+}
+
+func (h KafkaHeader) GetKey() string {
+ return h.Key
+}
+
+func (h KafkaHeader) GetValue() []byte {
+ return h.Value
+}
+
+type Writer interface {
+ GetTopic() string
+}
+
+type Message interface {
+ GetValue() []byte
+ GetKey() []byte
+ GetHeaders() []Header
+ SetHeaders([]Header)
+ GetTopic() string
+ GetPartition() int
+ GetOffset() int64
+}
+
+// KafkaConfig holds information from the kafka config for span tags.
+type KafkaConfig struct {
+ BootstrapServers string
+ ConsumerGroupID string
+}
diff --git a/contrib/segmentio/kafka.go.v0/kafka.go b/contrib/segmentio/kafka.go.v0/kafka.go
index ffd881dfa1..93af2643af 100644
--- a/contrib/segmentio/kafka.go.v0/kafka.go
+++ b/contrib/segmentio/kafka.go.v0/kafka.go
@@ -7,25 +7,21 @@ package kafka // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/segmentio/kafka
import (
"context"
- "math"
"strings"
- "gopkg.in/DataDog/dd-trace-go.v1/datastreams"
- "gopkg.in/DataDog/dd-trace-go.v1/datastreams/options"
+ "github.com/segmentio/kafka-go"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/segmentio/kafka.go.v0/internal/tracing"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
-
- "github.com/segmentio/kafka-go"
)
-const componentName = "segmentio/kafka.go.v0"
-
-func init() {
- telemetry.LoadIntegration(componentName)
- tracer.MarkIntegrationImported("github.com/segmentio/kafka-go")
+// A Reader wraps a kafka.Reader.
+type Reader struct {
+ *kafka.Reader
+ tracer *tracing.Tracer
+ kafkaCfg *tracing.KafkaConfig
+ prev ddtrace.Span
}
// NewReader calls kafka.NewReader and wraps the resulting Consumer.
@@ -33,74 +29,23 @@ func NewReader(conf kafka.ReaderConfig, opts ...Option) *Reader {
return WrapReader(kafka.NewReader(conf), opts...)
}
-// NewWriter calls kafka.NewWriter and wraps the resulting Producer.
-func NewWriter(conf kafka.WriterConfig, opts ...Option) *Writer {
- return WrapWriter(kafka.NewWriter(conf), opts...)
-}
-
// WrapReader wraps a kafka.Reader so that any consumed events are traced.
func WrapReader(c *kafka.Reader, opts ...Option) *Reader {
wrapped := &Reader{
Reader: c,
- cfg: newConfig(opts...),
}
-
+ kafkaCfg := tracing.KafkaConfig{}
if c.Config().Brokers != nil {
- wrapped.bootstrapServers = strings.Join(c.Config().Brokers, ",")
+ kafkaCfg.BootstrapServers = strings.Join(c.Config().Brokers, ",")
}
-
if c.Config().GroupID != "" {
- wrapped.groupID = c.Config().GroupID
+ kafkaCfg.ConsumerGroupID = c.Config().GroupID
}
-
- log.Debug("contrib/segmentio/kafka-go.v0/kafka: Wrapping Reader: %#v", wrapped.cfg)
+ wrapped.tracer = tracing.NewTracer(kafkaCfg, opts...)
+ log.Debug("contrib/segmentio/kafka-go.v0/kafka: Wrapping Reader: %#v", wrapped.tracer)
return wrapped
}
-// A kafkaConfig struct holds information from the kafka config for span tags
-type kafkaConfig struct {
- bootstrapServers string
- groupID string
-}
-
-// A Reader wraps a kafka.Reader.
-type Reader struct {
- *kafka.Reader
- kafkaConfig
- cfg *config
- prev ddtrace.Span
-}
-
-func (r *Reader) startSpan(ctx context.Context, msg *kafka.Message) ddtrace.Span {
- opts := []tracer.StartSpanOption{
- tracer.ServiceName(r.cfg.consumerServiceName),
- tracer.ResourceName("Consume Topic " + msg.Topic),
- tracer.SpanType(ext.SpanTypeMessageConsumer),
- tracer.Tag(ext.MessagingKafkaPartition, msg.Partition),
- tracer.Tag("offset", msg.Offset),
- tracer.Tag(ext.Component, componentName),
- tracer.Tag(ext.SpanKind, ext.SpanKindConsumer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
- tracer.Tag(ext.KafkaBootstrapServers, r.bootstrapServers),
- tracer.Measured(),
- }
-
- if !math.IsNaN(r.cfg.analyticsRate) {
- opts = append(opts, tracer.Tag(ext.EventSampleRate, r.cfg.analyticsRate))
- }
- // kafka supports headers, so try to extract a span context
- carrier := messageCarrier{msg}
- if spanctx, err := tracer.Extract(carrier); err == nil {
- opts = append(opts, tracer.ChildOf(spanctx))
- }
- span, _ := tracer.StartSpanFromContext(ctx, r.cfg.consumerSpanName, opts...)
- // reinject the span context so consumers can pick it up
- if err := tracer.Inject(span.Context(), carrier); err != nil {
- log.Debug("contrib/segmentio/kafka.go.v0: Failed to inject span context into carrier in reader, %v", err)
- }
- return span
-}
-
// Close calls the underlying Reader.Close and if polling is enabled, finishes
// any remaining span.
func (r *Reader) Close() error {
@@ -122,8 +67,9 @@ func (r *Reader) ReadMessage(ctx context.Context) (kafka.Message, error) {
if err != nil {
return kafka.Message{}, err
}
- r.prev = r.startSpan(ctx, &msg)
- setConsumeCheckpoint(r.cfg.dataStreamsEnabled, r.groupID, &msg)
+ tMsg := wrapMessage(&msg)
+ r.prev = r.tracer.StartConsumeSpan(ctx, tMsg)
+ r.tracer.SetConsumeDSMCheckpoint(tMsg)
return msg, nil
}
@@ -137,147 +83,51 @@ func (r *Reader) FetchMessage(ctx context.Context) (kafka.Message, error) {
if err != nil {
return msg, err
}
- r.prev = r.startSpan(ctx, &msg)
- setConsumeCheckpoint(r.cfg.dataStreamsEnabled, r.groupID, &msg)
+ tMsg := wrapMessage(&msg)
+ r.prev = r.tracer.StartConsumeSpan(ctx, tMsg)
+ r.tracer.SetConsumeDSMCheckpoint(tMsg)
return msg, nil
}
-func setConsumeCheckpoint(enabled bool, groupID string, msg *kafka.Message) {
- if !enabled || msg == nil {
- return
- }
- edges := []string{"direction:in", "topic:" + msg.Topic, "type:kafka"}
- if groupID != "" {
- edges = append(edges, "group:"+groupID)
- }
- carrier := messageCarrier{msg}
- ctx, ok := tracer.SetDataStreamsCheckpointWithParams(
- datastreams.ExtractFromBase64Carrier(context.Background(), carrier),
- options.CheckpointParams{PayloadSize: getConsumerMsgSize(msg)},
- edges...,
- )
- if !ok {
- return
- }
- datastreams.InjectToBase64Carrier(ctx, carrier)
- if groupID != "" {
- // only track Kafka lag if a consumer group is set.
- // since there is no ack mechanism, we consider that messages read are committed right away.
- tracer.TrackKafkaCommitOffset(groupID, msg.Topic, int32(msg.Partition), msg.Offset)
- }
+// Writer wraps a kafka.Writer with tracing config data
+type Writer struct {
+ *kafka.Writer
+ tracer *tracing.Tracer
+}
+
+// NewWriter calls kafka.NewWriter and wraps the resulting Producer.
+func NewWriter(conf kafka.WriterConfig, opts ...Option) *Writer {
+ return WrapWriter(kafka.NewWriter(conf), opts...)
}
// WrapWriter wraps a kafka.Writer so requests are traced.
func WrapWriter(w *kafka.Writer, opts ...Option) *Writer {
writer := &Writer{
Writer: w,
- cfg: newConfig(opts...),
}
-
+ kafkaCfg := tracing.KafkaConfig{}
if w.Addr.String() != "" {
- writer.bootstrapServers = w.Addr.String()
+ kafkaCfg.BootstrapServers = w.Addr.String()
}
- log.Debug("contrib/segmentio/kafka.go.v0: Wrapping Writer: %#v", writer.cfg)
+ writer.tracer = tracing.NewTracer(kafkaCfg, opts...)
+ log.Debug("contrib/segmentio/kafka.go.v0: Wrapping Writer: %#v", writer.tracer)
return writer
}
-// Writer wraps a kafka.Writer with tracing config data
-type Writer struct {
- *kafka.Writer
- kafkaConfig
- cfg *config
-}
-
-func (w *Writer) startSpan(ctx context.Context, msg *kafka.Message) ddtrace.Span {
- opts := []tracer.StartSpanOption{
- tracer.ServiceName(w.cfg.producerServiceName),
- tracer.SpanType(ext.SpanTypeMessageProducer),
- tracer.Tag(ext.Component, componentName),
- tracer.Tag(ext.SpanKind, ext.SpanKindProducer),
- tracer.Tag(ext.MessagingSystem, ext.MessagingSystemKafka),
- tracer.Tag(ext.KafkaBootstrapServers, w.bootstrapServers),
- }
- if w.Writer.Topic != "" {
- opts = append(opts, tracer.ResourceName("Produce Topic "+w.Writer.Topic))
- } else {
- opts = append(opts, tracer.ResourceName("Produce Topic "+msg.Topic))
- }
- if !math.IsNaN(w.cfg.analyticsRate) {
- opts = append(opts, tracer.Tag(ext.EventSampleRate, w.cfg.analyticsRate))
- }
- carrier := messageCarrier{msg}
- span, _ := tracer.StartSpanFromContext(ctx, w.cfg.producerSpanName, opts...)
- if err := tracer.Inject(span.Context(), carrier); err != nil {
- log.Debug("contrib/segmentio/kafka.go.v0: Failed to inject span context into carrier in writer, %v", err)
- }
- return span
-}
-
-func finishSpan(span ddtrace.Span, partition int, offset int64, err error) {
- span.SetTag(ext.MessagingKafkaPartition, partition)
- span.SetTag("offset", offset)
- span.Finish(tracer.WithError(err))
-}
-
// WriteMessages calls kafka.go.v0.Writer.WriteMessages and traces the requests.
func (w *Writer) WriteMessages(ctx context.Context, msgs ...kafka.Message) error {
// although there's only one call made to the SyncProducer, the messages are
// treated individually, so we create a span for each one
spans := make([]ddtrace.Span, len(msgs))
for i := range msgs {
- spans[i] = w.startSpan(ctx, &msgs[i])
- setProduceCheckpoint(w.cfg.dataStreamsEnabled, &msgs[i], w.Writer)
+ tMsg := wrapMessage(&msgs[i])
+ tWriter := wrapTracingWriter(w.Writer)
+ spans[i] = w.tracer.StartProduceSpan(ctx, tWriter, tMsg)
+ w.tracer.SetProduceDSMCheckpoint(tMsg, tWriter)
}
err := w.Writer.WriteMessages(ctx, msgs...)
for i, span := range spans {
- finishSpan(span, msgs[i].Partition, msgs[i].Offset, err)
+ w.tracer.FinishProduceSpan(span, msgs[i].Partition, msgs[i].Offset, err)
}
return err
}
-
-func setProduceCheckpoint(enabled bool, msg *kafka.Message, writer *kafka.Writer) {
- if !enabled || msg == nil {
- return
- }
-
- var topic string
- if writer.Topic != "" {
- topic = writer.Topic
- } else {
- topic = msg.Topic
- }
-
- edges := []string{"direction:out", "topic:" + topic, "type:kafka"}
- carrier := messageCarrier{msg}
- ctx, ok := tracer.SetDataStreamsCheckpointWithParams(
- datastreams.ExtractFromBase64Carrier(context.Background(), carrier),
- options.CheckpointParams{PayloadSize: getProducerMsgSize(msg)},
- edges...,
- )
- if !ok {
- return
- }
-
- // Headers will be dropped if the current protocol does not support them
- datastreams.InjectToBase64Carrier(ctx, carrier)
-}
-
-func getProducerMsgSize(msg *kafka.Message) (size int64) {
- for _, header := range msg.Headers {
- size += int64(len(header.Key) + len(header.Value))
- }
- if msg.Value != nil {
- size += int64(len(msg.Value))
- }
- if msg.Key != nil {
- size += int64(len(msg.Key))
- }
- return size
-}
-
-func getConsumerMsgSize(msg *kafka.Message) (size int64) {
- for _, header := range msg.Headers {
- size += int64(len(header.Key) + len(header.Value))
- }
- return size + int64(len(msg.Value)+len(msg.Key))
-}
diff --git a/contrib/segmentio/kafka.go.v0/kafka_test.go b/contrib/segmentio/kafka.go.v0/kafka_test.go
index 5d33859f7b..ab4e7839c9 100644
--- a/contrib/segmentio/kafka.go.v0/kafka_test.go
+++ b/contrib/segmentio/kafka.go.v0/kafka_test.go
@@ -7,19 +7,26 @@ package kafka
import (
"context"
+ "errors"
+ "fmt"
+ "log"
+ "net"
"os"
+ "strconv"
"testing"
"time"
+ "github.com/segmentio/kafka-go"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/namingschematest"
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/segmentio/kafka.go.v0/internal/tracing"
"gopkg.in/DataDog/dd-trace-go.v1/datastreams"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
-
- kafka "github.com/segmentio/kafka-go"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
)
const (
@@ -28,44 +35,139 @@ const (
testReaderMaxWait = 10 * time.Millisecond
)
-func skipIntegrationTest(t *testing.T) {
- if _, ok := os.LookupEnv("INTEGRATION"); !ok {
- t.Skip("🚧 Skipping integration test (INTEGRATION environment variable is not set)")
+var (
+ // add some dummy values to broker/addr to test bootstrap servers.
+ kafkaBrokers = []string{"localhost:9092", "localhost:9093", "localhost:9094"}
+)
+
+func TestMain(m *testing.M) {
+ _, ok := os.LookupEnv("INTEGRATION")
+ if !ok {
+ log.Println("🚧 Skipping integration test (INTEGRATION environment variable is not set)")
+ os.Exit(0)
}
+ cleanup := createTopic()
+ exitCode := m.Run()
+ cleanup()
+ os.Exit(exitCode)
}
-/*
-to setup the integration test locally run:
- docker-compose -f local_testing.yaml up
-*/
+func testWriter() *kafka.Writer {
+ return &kafka.Writer{
+ Addr: kafka.TCP(kafkaBrokers...),
+ Topic: testTopic,
+ RequiredAcks: kafka.RequireOne,
+ Balancer: &kafka.LeastBytes{},
+ }
+}
+
+func testReader() *kafka.Reader {
+ return kafka.NewReader(kafka.ReaderConfig{
+ Brokers: kafkaBrokers,
+ GroupID: testGroupID,
+ Topic: testTopic,
+ MaxWait: testReaderMaxWait,
+ MaxBytes: 10e6, // 10MB
+ })
+}
+
+func createTopic() func() {
+ conn, err := kafka.Dial("tcp", "localhost:9092")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer conn.Close()
+
+ controller, err := conn.Controller()
+ if err != nil {
+ log.Fatal(err)
+ }
+ controllerConn, err := kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
+ if err != nil {
+ log.Fatal(err)
+ }
+ if err := controllerConn.DeleteTopics(testTopic); err != nil && !errors.Is(err, kafka.UnknownTopicOrPartition) {
+ log.Fatalf("failed to delete topic: %v", err)
+ }
+ topicConfigs := []kafka.TopicConfig{
+ {
+ Topic: testTopic,
+ NumPartitions: 1,
+ ReplicationFactor: 1,
+ },
+ }
+ if err := controllerConn.CreateTopics(topicConfigs...); err != nil {
+ log.Fatal(err)
+ }
+ if err := ensureTopicReady(); err != nil {
+ log.Fatal(err)
+ }
+ return func() {
+ if err := controllerConn.DeleteTopics(testTopic); err != nil {
+ log.Printf("failed to delete topic: %v", err)
+ }
+ if err := controllerConn.Close(); err != nil {
+ log.Printf("failed to close controller connection: %v", err)
+ }
+ }
+}
+
+func ensureTopicReady() error {
+ const (
+ maxRetries = 10
+ retryDelay = 100 * time.Millisecond
+ )
+ writer := testWriter()
+ defer writer.Close()
+ reader := testReader()
+ defer reader.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ var (
+ retryCount int
+ err error
+ )
+ for retryCount < maxRetries {
+ err = writer.WriteMessages(ctx, kafka.Message{Key: []byte("some-key"), Value: []byte("some-value")})
+ if err == nil {
+ break
+ }
+ // This error happens sometimes with brand-new topics, as there is a delay between when the topic is created
+ // on the broker, and when the topic can actually be written to.
+ if errors.Is(err, kafka.UnknownTopicOrPartition) {
+ retryCount++
+ log.Printf("topic not ready yet, retrying produce in %s (retryCount: %d)\n", retryDelay, retryCount)
+ time.Sleep(retryDelay)
+ }
+ }
+ if err != nil {
+ return fmt.Errorf("timeout waiting for topic to be ready: %w", err)
+ }
+ // read the message to ensure we don't pollute tests
+ _, err = reader.ReadMessage(ctx)
+ if err != nil {
+ return err
+ }
+ return nil
+}
type readerOpFn func(t *testing.T, r *Reader)
func genIntegrationTestSpans(t *testing.T, mt mocktracer.Tracer, writerOp func(t *testing.T, w *Writer), readerOp readerOpFn, writerOpts []Option, readerOpts []Option) ([]mocktracer.Span, []kafka.Message) {
- skipIntegrationTest(t)
-
writtenMessages := []kafka.Message{}
- // add some dummy values to broker/addr to test bootstrap servers.
- kw := &kafka.Writer{
- Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
- Topic: testTopic,
- RequiredAcks: kafka.RequireOne,
- Completion: func(messages []kafka.Message, err error) {
- writtenMessages = append(writtenMessages, messages...)
- },
+ kw := testWriter()
+ kw.Completion = func(messages []kafka.Message, err error) {
+ writtenMessages = append(writtenMessages, messages...)
}
w := WrapWriter(kw, writerOpts...)
writerOp(t, w)
err := w.Close()
require.NoError(t, err)
- r := NewReader(kafka.ReaderConfig{
- Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
- GroupID: testGroupID,
- Topic: testTopic,
- MaxWait: testReaderMaxWait,
- }, readerOpts...)
+ r := WrapReader(testReader(), readerOpts...)
readerOp(t, r)
err = r.Close()
require.NoError(t, err)
@@ -113,8 +215,8 @@ func TestReadMessageFunctional(t *testing.T) {
[]Option{WithDataStreams()},
)
- assert.Len(t, writtenMessages, len(messagesToWrite))
- assert.Len(t, readMessages, len(messagesToWrite))
+ require.Len(t, writtenMessages, len(messagesToWrite))
+ require.Len(t, readMessages, len(messagesToWrite))
// producer span
s0 := spans[0]
@@ -129,7 +231,7 @@ func TestReadMessageFunctional(t *testing.T) {
assert.Equal(t, "kafka", s0.Tag(ext.MessagingSystem))
assert.Equal(t, "localhost:9092,localhost:9093,localhost:9094", s0.Tag(ext.KafkaBootstrapServers))
- p, ok := datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), messageCarrier{&writtenMessages[0]}))
+ p, ok := datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), tracing.NewMessageCarrier(wrapMessage(&writtenMessages[0]))))
assert.True(t, ok)
expectedCtx, _ := tracer.SetDataStreamsCheckpoint(context.Background(), "direction:out", "topic:"+testTopic, "type:kafka")
expected, _ := datastreams.PathwayFromContext(expectedCtx)
@@ -149,10 +251,14 @@ func TestReadMessageFunctional(t *testing.T) {
assert.Equal(t, "kafka", s1.Tag(ext.MessagingSystem))
assert.Equal(t, "localhost:9092,localhost:9093,localhost:9094", s1.Tag(ext.KafkaBootstrapServers))
- p, ok = datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), messageCarrier{&readMessages[0]}))
+ // context propagation
+ assert.Equal(t, s0.SpanID(), s1.ParentID(), "consume span should be child of the produce span")
+ assert.Equal(t, s0.TraceID(), s1.TraceID(), "spans should have the same trace id")
+
+ p, ok = datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), tracing.NewMessageCarrier(wrapMessage(&readMessages[0]))))
assert.True(t, ok)
expectedCtx, _ = tracer.SetDataStreamsCheckpoint(
- datastreams.ExtractFromBase64Carrier(context.Background(), messageCarrier{&writtenMessages[0]}),
+ datastreams.ExtractFromBase64Carrier(context.Background(), tracing.NewMessageCarrier(wrapMessage(&writtenMessages[0]))),
"direction:in", "topic:"+testTopic, "type:kafka", "group:"+testGroupID,
)
expected, _ = datastreams.PathwayFromContext(expectedCtx)
@@ -209,7 +315,7 @@ func TestFetchMessageFunctional(t *testing.T) {
assert.Equal(t, "kafka", s0.Tag(ext.MessagingSystem))
assert.Equal(t, "localhost:9092,localhost:9093,localhost:9094", s0.Tag(ext.KafkaBootstrapServers))
- p, ok := datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), messageCarrier{&writtenMessages[0]}))
+ p, ok := datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), tracing.NewMessageCarrier(wrapMessage(&writtenMessages[0]))))
assert.True(t, ok)
expectedCtx, _ := tracer.SetDataStreamsCheckpoint(context.Background(), "direction:out", "topic:"+testTopic, "type:kafka")
expected, _ := datastreams.PathwayFromContext(expectedCtx)
@@ -229,10 +335,13 @@ func TestFetchMessageFunctional(t *testing.T) {
assert.Equal(t, "kafka", s1.Tag(ext.MessagingSystem))
assert.Equal(t, "localhost:9092,localhost:9093,localhost:9094", s1.Tag(ext.KafkaBootstrapServers))
- p, ok = datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), messageCarrier{&readMessages[0]}))
+ // context propagation
+ assert.Equal(t, s0.SpanID(), s1.ParentID(), "consume span should be child of the produce span")
+
+ p, ok = datastreams.PathwayFromContext(datastreams.ExtractFromBase64Carrier(context.Background(), tracing.NewMessageCarrier(wrapMessage(&readMessages[0]))))
assert.True(t, ok)
expectedCtx, _ = tracer.SetDataStreamsCheckpoint(
- datastreams.ExtractFromBase64Carrier(context.Background(), messageCarrier{&writtenMessages[0]}),
+ datastreams.ExtractFromBase64Carrier(context.Background(), tracing.NewMessageCarrier(wrapMessage(&writtenMessages[0]))),
"direction:in", "topic:"+testTopic, "type:kafka", "group:"+testGroupID,
)
expected, _ = datastreams.PathwayFromContext(expectedCtx)
@@ -240,6 +349,62 @@ func TestFetchMessageFunctional(t *testing.T) {
assert.Equal(t, expected.GetHash(), p.GetHash())
}
+func TestProduceMultipleMessages(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ messages := []kafka.Message{
+ {
+ Key: []byte("key1"),
+ Value: []byte("value1"),
+ },
+ {
+ Key: []byte("key2"),
+ Value: []byte("value2"),
+ },
+ {
+ Key: []byte("key3"),
+ Value: []byte("value3"),
+ },
+ }
+
+ writer := WrapWriter(testWriter())
+ reader := WrapReader(testReader())
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ err := writer.WriteMessages(ctx, messages...)
+ require.NoError(t, err)
+ require.NoError(t, writer.Close())
+
+ curMsg := 0
+ for curMsg < len(messages) {
+ readMsg, err := reader.ReadMessage(ctx)
+ require.NoError(t, err)
+ require.Equal(t, string(messages[curMsg].Key), string(readMsg.Key))
+ require.Equal(t, string(messages[curMsg].Value), string(readMsg.Value))
+ curMsg++
+ }
+ require.NoError(t, reader.Close())
+
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 6)
+
+ produceSpans := spans[0:3]
+ consumeSpans := spans[3:6]
+ for i := 0; i < 3; i++ {
+ ps := produceSpans[i]
+ cs := consumeSpans[i]
+
+ assert.Equal(t, "kafka.produce", ps.OperationName(), "wrong produce span name")
+ assert.Equal(t, "kafka.consume", cs.OperationName(), "wrong consume span name")
+ assert.Equal(t, cs.ParentID(), ps.SpanID(), "consume span should be child of a produce span")
+ assert.Equal(t, uint64(0), ps.ParentID(), "produce span should not be child of any span")
+ assert.Equal(t, cs.TraceID(), ps.TraceID(), "spans should be part of the same trace")
+ }
+}
+
func TestNamingSchema(t *testing.T) {
genSpans := func(t *testing.T, serviceOverride string) []mocktracer.Span {
var opts []Option
@@ -282,40 +447,50 @@ func TestNamingSchema(t *testing.T) {
namingschematest.NewKafkaTest(genSpans)(t)
}
-func BenchmarkReaderStartSpan(b *testing.B) {
- r := NewReader(kafka.ReaderConfig{
- Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"},
- GroupID: testGroupID,
- Topic: testTopic,
- MaxWait: testReaderMaxWait,
- })
+// benchSpan is a package-level variable used to prevent compiler optimisations in the benchmarks below.
+var benchSpan ddtrace.Span
+func BenchmarkReaderStartSpan(b *testing.B) {
+ ctx := context.Background()
+ kafkaCfg := tracing.KafkaConfig{
+ BootstrapServers: "localhost:9092,localhost:9093,localhost:9094",
+ ConsumerGroupID: testGroupID,
+ }
+ tr := tracing.NewTracer(kafkaCfg)
msg := kafka.Message{
Key: []byte("key1"),
Value: []byte("value1"),
}
+ var result ddtrace.Span
b.ResetTimer()
for n := 0; n < b.N; n++ {
- r.startSpan(nil, &msg)
+ result = tr.StartConsumeSpan(ctx, wrapMessage(&msg))
}
+ benchSpan = result
}
func BenchmarkWriterStartSpan(b *testing.B) {
+ ctx := context.Background()
+ kafkaCfg := tracing.KafkaConfig{
+ BootstrapServers: "localhost:9092,localhost:9093,localhost:9094",
+ ConsumerGroupID: testGroupID,
+ }
+ tr := tracing.NewTracer(kafkaCfg)
kw := &kafka.Writer{
Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"),
Topic: testTopic,
RequiredAcks: kafka.RequireOne,
}
- w := WrapWriter(kw)
-
msg := kafka.Message{
Key: []byte("key1"),
Value: []byte("value1"),
}
+ var result ddtrace.Span
b.ResetTimer()
for n := 0; n < b.N; n++ {
- w.startSpan(nil, &msg)
+ result = tr.StartProduceSpan(ctx, wrapTracingWriter(kw), wrapMessage(&msg))
}
+ benchSpan = result
}
diff --git a/contrib/segmentio/kafka.go.v0/local_testing.yaml b/contrib/segmentio/kafka.go.v0/local_testing.yaml
deleted file mode 100644
index 0cc5c8a355..0000000000
--- a/contrib/segmentio/kafka.go.v0/local_testing.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-version: "3"
-services:
- zookeeper:
- image: 'bitnami/zookeeper:latest'
- ports:
- - '2181:2181'
- environment:
- - ALLOW_ANONYMOUS_LOGIN=yes
- kafka:
- image: 'bitnami/kafka:2'
- ports:
- - '9092:9092'
- environment:
- - KAFKA_BROKER_ID=1
- - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092
- - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092
- - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- - ALLOW_PLAINTEXT_LISTENER=yes
- depends_on:
- - zookeeper
- create-topics:
- image: confluentinc/cp-kafka:5.5.0
- hostname: create-topics
- container_name: create-topics
- restart: on-failure
- depends_on:
- - kafka
- command: "
- bash -c 'sleep 5 && \
- kafka-topics --create --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --topic gosegtest'"
- environment:
- KAFKA_BROKER_ID: ignored
- KAFKA_ZOOKEEPER_CONNECT: ignored
diff --git a/contrib/segmentio/kafka.go.v0/option.go b/contrib/segmentio/kafka.go.v0/option.go
index 8b14b7aab9..b31d24e2d5 100644
--- a/contrib/segmentio/kafka.go.v0/option.go
+++ b/contrib/segmentio/kafka.go.v0/option.go
@@ -6,82 +6,21 @@
package kafka
import (
- "math"
-
- "gopkg.in/DataDog/dd-trace-go.v1/internal"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/namingschema"
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/segmentio/kafka.go.v0/internal/tracing"
)
-const defaultServiceName = "kafka"
-
-type config struct {
- consumerServiceName string
- producerServiceName string
- consumerSpanName string
- producerSpanName string
- analyticsRate float64
- dataStreamsEnabled bool
-}
-
// An Option customizes the config.
-type Option func(cfg *config)
-
-func newConfig(opts ...Option) *config {
- cfg := &config{
- // analyticsRate: globalconfig.AnalyticsRate(),
- analyticsRate: math.NaN(),
- }
- if internal.BoolEnv("DD_TRACE_KAFKA_ANALYTICS_ENABLED", false) {
- cfg.analyticsRate = 1.0
- }
-
- cfg.dataStreamsEnabled = internal.BoolEnv("DD_DATA_STREAMS_ENABLED", false)
-
- cfg.consumerServiceName = namingschema.ServiceName(defaultServiceName)
- cfg.producerServiceName = namingschema.ServiceNameOverrideV0(defaultServiceName, defaultServiceName)
- cfg.consumerSpanName = namingschema.OpName(namingschema.KafkaInbound)
- cfg.producerSpanName = namingschema.OpName(namingschema.KafkaOutbound)
-
- for _, opt := range opts {
- opt(cfg)
- }
- return cfg
-}
+type Option = tracing.Option
// WithServiceName sets the config service name to serviceName.
-func WithServiceName(serviceName string) Option {
- return func(cfg *config) {
- cfg.consumerServiceName = serviceName
- cfg.producerServiceName = serviceName
- }
-}
+var WithServiceName = tracing.WithServiceName
// WithAnalytics enables Trace Analytics for all started spans.
-func WithAnalytics(on bool) Option {
- return func(cfg *config) {
- if on {
- cfg.analyticsRate = 1.0
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalytics = tracing.WithAnalytics
// WithAnalyticsRate sets the sampling rate for Trace Analytics events
// correlated to started spans.
-func WithAnalyticsRate(rate float64) Option {
- return func(cfg *config) {
- if rate >= 0.0 && rate <= 1.0 {
- cfg.analyticsRate = rate
- } else {
- cfg.analyticsRate = math.NaN()
- }
- }
-}
+var WithAnalyticsRate = tracing.WithAnalyticsRate
// WithDataStreams enables the Data Streams monitoring product features: https://www.datadoghq.com/product/data-streams-monitoring/
-func WithDataStreams() Option {
- return func(cfg *config) {
- cfg.dataStreamsEnabled = true
- }
-}
+var WithDataStreams = tracing.WithDataStreams
diff --git a/contrib/segmentio/kafka.go.v0/tracing.go b/contrib/segmentio/kafka.go.v0/tracing.go
new file mode 100644
index 0000000000..fdf1e84b9b
--- /dev/null
+++ b/contrib/segmentio/kafka.go.v0/tracing.go
@@ -0,0 +1,90 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2016 Datadog, Inc.
+
+package kafka
+
+import (
+ "github.com/segmentio/kafka-go"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/segmentio/kafka.go.v0/internal/tracing"
+)
+
+type wMessage struct {
+ *kafka.Message
+}
+
+func wrapMessage(msg *kafka.Message) tracing.Message {
+ if msg == nil {
+ return nil
+ }
+ return &wMessage{msg}
+}
+
+func (w *wMessage) GetValue() []byte {
+ return w.Value
+}
+
+func (w *wMessage) GetKey() []byte {
+ return w.Key
+}
+
+func (w *wMessage) GetHeaders() []tracing.Header {
+ hs := make([]tracing.Header, 0, len(w.Headers))
+ for _, h := range w.Headers {
+ hs = append(hs, wrapHeader(h))
+ }
+ return hs
+}
+
+func (w *wMessage) SetHeaders(headers []tracing.Header) {
+ hs := make([]kafka.Header, 0, len(headers))
+ for _, h := range headers {
+ hs = append(hs, kafka.Header{
+ Key: h.GetKey(),
+ Value: h.GetValue(),
+ })
+ }
+ w.Message.Headers = hs
+}
+
+func (w *wMessage) GetTopic() string {
+ return w.Topic
+}
+
+func (w *wMessage) GetPartition() int {
+ return w.Partition
+}
+
+func (w *wMessage) GetOffset() int64 {
+ return w.Offset
+}
+
+type wHeader struct {
+ kafka.Header
+}
+
+func wrapHeader(h kafka.Header) tracing.Header {
+ return &wHeader{h}
+}
+
+func (w wHeader) GetKey() string {
+ return w.Key
+}
+
+func (w wHeader) GetValue() []byte {
+ return w.Value
+}
+
+type wWriter struct {
+ *kafka.Writer
+}
+
+func (w *wWriter) GetTopic() string {
+ return w.Topic
+}
+
+func wrapTracingWriter(w *kafka.Writer) tracing.Writer {
+ return &wWriter{w}
+}
diff --git a/ddtrace/mocktracer/main_test.go b/ddtrace/mocktracer/main_test.go
new file mode 100644
index 0000000000..d584962bf7
--- /dev/null
+++ b/ddtrace/mocktracer/main_test.go
@@ -0,0 +1,15 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package mocktracer
+
+import (
+ "go.uber.org/goleak"
+ "testing"
+)
+
+func TestMain(m *testing.M) {
+ goleak.VerifyTestMain(m)
+}
diff --git a/ddtrace/mocktracer/mockspan_test.go b/ddtrace/mocktracer/mockspan_test.go
index ad289434f4..9d849ac82e 100644
--- a/ddtrace/mocktracer/mockspan_test.go
+++ b/ddtrace/mocktracer/mockspan_test.go
@@ -201,8 +201,10 @@ func TestSpanString(t *testing.T) {
}
func TestSpanWithID(t *testing.T) {
+ tr := newMockTracer()
+ defer tr.Stop()
spanID := uint64(123456789)
- span := newMockTracer().StartSpan("", tracer.WithSpanID(spanID))
+ span := tr.StartSpan("", tracer.WithSpanID(spanID))
assert := assert.New(t)
assert.Equal(spanID, span.Context().SpanID())
@@ -243,6 +245,8 @@ func TestSetUser(t *testing.T) {
t.Run("nested", func(t *testing.T) {
tr := newMockTracer()
+ defer tr.Stop()
+
s0 := tr.StartSpan("root operation")
s1 := tr.StartSpan("nested operation", tracer.ChildOf(s0.Context()))
s2 := tr.StartSpan("nested nested operation", tracer.ChildOf(s1.Context()))
diff --git a/ddtrace/mocktracer/mocktracer.go b/ddtrace/mocktracer/mocktracer.go
index 835a3e1802..2a210e07bb 100644
--- a/ddtrace/mocktracer/mocktracer.go
+++ b/ddtrace/mocktracer/mocktracer.go
@@ -87,9 +87,10 @@ func newMockTracer() *mocktracer {
}
// Stop deactivates the mock tracer and sets the active tracer to a no-op.
-func (*mocktracer) Stop() {
+func (t *mocktracer) Stop() {
internal.SetGlobalTracer(&internal.NoopTracer{})
internal.Testing = false
+ t.dsmProcessor.Stop()
}
func (t *mocktracer) StartSpan(operationName string, opts ...ddtrace.StartSpanOption) ddtrace.Span {
diff --git a/ddtrace/mocktracer/mocktracer_test.go b/ddtrace/mocktracer/mocktracer_test.go
index 8c863f60c5..fe2a2c229a 100644
--- a/ddtrace/mocktracer/mocktracer_test.go
+++ b/ddtrace/mocktracer/mocktracer_test.go
@@ -22,6 +22,8 @@ func TestStart(t *testing.T) {
if tt, ok := internal.GetGlobalTracer().(Tracer); !ok || tt != trc {
t.Fail()
}
+ // If the tracer isn't stopped it leaks goroutines, and breaks other tests.
+ trc.Stop()
}
func TestTracerStop(t *testing.T) {
@@ -37,6 +39,8 @@ func TestTracerStartSpan(t *testing.T) {
t.Run("with-service", func(t *testing.T) {
mt := newMockTracer()
+ defer mt.Stop()
+
parent := newSpan(mt, "http.request", &ddtrace.StartSpanConfig{Tags: parentTags})
s, ok := mt.StartSpan(
"db.query",
@@ -58,6 +62,8 @@ func TestTracerStartSpan(t *testing.T) {
t.Run("inherit", func(t *testing.T) {
mt := newMockTracer()
+ defer mt.Stop()
+
parent := newSpan(mt, "http.request", &ddtrace.StartSpanConfig{Tags: parentTags})
s, ok := mt.StartSpan("db.query", tracer.ChildOf(parent.Context())).(*mockspan)
@@ -74,6 +80,8 @@ func TestTracerStartSpan(t *testing.T) {
func TestTracerFinishedSpans(t *testing.T) {
mt := newMockTracer()
+ defer mt.Stop()
+
assert.Empty(t, mt.FinishedSpans())
parent := mt.StartSpan("http.request")
child := mt.StartSpan("db.query", tracer.ChildOf(parent.Context()))
@@ -96,6 +104,8 @@ func TestTracerFinishedSpans(t *testing.T) {
func TestTracerOpenSpans(t *testing.T) {
mt := newMockTracer()
+ defer mt.Stop()
+
assert.Empty(t, mt.OpenSpans())
parent := mt.StartSpan("http.request")
child := mt.StartSpan("db.query", tracer.ChildOf(parent.Context()))
@@ -114,6 +124,8 @@ func TestTracerOpenSpans(t *testing.T) {
func TestTracerSetUser(t *testing.T) {
mt := newMockTracer()
+ defer mt.Stop()
+
span := mt.StartSpan("http.request")
tracer.SetUser(span, "test-user",
tracer.WithUserEmail("email"),
@@ -139,6 +151,7 @@ func TestTracerSetUser(t *testing.T) {
func TestTracerReset(t *testing.T) {
assert := assert.New(t)
mt := newMockTracer()
+ defer mt.Stop()
span := mt.StartSpan("parent")
_ = mt.StartSpan("child", tracer.ChildOf(span.Context()))
@@ -157,6 +170,8 @@ func TestTracerReset(t *testing.T) {
func TestTracerInject(t *testing.T) {
t.Run("errors", func(t *testing.T) {
mt := newMockTracer()
+ defer mt.Stop()
+
assert := assert.New(t)
err := mt.Inject(&spanContext{}, 2)
diff --git a/ddtrace/tracer/civisibility_payload.go b/ddtrace/tracer/civisibility_payload.go
index df8ffc04cc..ce8cc0c2f9 100644
--- a/ddtrace/tracer/civisibility_payload.go
+++ b/ddtrace/tracer/civisibility_payload.go
@@ -10,7 +10,10 @@ import (
"sync/atomic"
"github.com/tinylib/msgp/msgp"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils"
"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
"gopkg.in/DataDog/dd-trace-go.v1/internal/version"
)
@@ -46,6 +49,7 @@ func (p *ciVisibilityPayload) push(event *ciVisibilityEvent) error {
//
// A pointer to a newly initialized civisibilitypayload instance.
func newCiVisibilityPayload() *ciVisibilityPayload {
+ log.Debug("ciVisibilityPayload: creating payload instance")
return &ciVisibilityPayload{newPayload()}
}
@@ -61,6 +65,27 @@ func newCiVisibilityPayload() *ciVisibilityPayload {
// A pointer to a bytes.Buffer containing the encoded CI Visibility payload.
// An error if reading from the buffer or encoding the payload fails.
func (p *ciVisibilityPayload) getBuffer(config *config) (*bytes.Buffer, error) {
+ log.Debug("ciVisibilityPayload: .getBuffer (count: %v)", p.itemCount())
+
+ // Create a buffer to read the current payload
+ payloadBuf := new(bytes.Buffer)
+ if _, err := payloadBuf.ReadFrom(p.payload); err != nil {
+ return nil, err
+ }
+
+ // Create the visibility payload
+ visibilityPayload := p.writeEnvelope(config.env, payloadBuf.Bytes())
+
+ // Create a new buffer to encode the visibility payload in MessagePack format
+ encodedBuf := new(bytes.Buffer)
+ if err := msgp.Encode(encodedBuf, visibilityPayload); err != nil {
+ return nil, err
+ }
+
+ return encodedBuf, nil
+}
+
+func (p *ciVisibilityPayload) writeEnvelope(env string, events []byte) *ciTestCyclePayload {
/*
The Payload format in the CI Visibility protocol is like this:
@@ -82,36 +107,35 @@ func (p *ciVisibilityPayload) getBuffer(config *config) (*bytes.Buffer, error) {
The event format can be found in the `civisibility_tslv.go` file in the ciVisibilityEvent documentation
*/
- // Create a buffer to read the current payload
- payloadBuf := new(bytes.Buffer)
- if _, err := payloadBuf.ReadFrom(p.payload); err != nil {
- return nil, err
- }
-
// Create the metadata map
allMetadata := map[string]string{
"language": "go",
"runtime-id": globalconfig.RuntimeID(),
"library_version": version.Tag,
}
- if config.env != "" {
- allMetadata["env"] = config.env
+ if env != "" {
+ allMetadata["env"] = env
}
// Create the visibility payload
- visibilityPayload := ciTestCyclePayload{
+ visibilityPayload := &ciTestCyclePayload{
Version: 1,
Metadata: map[string]map[string]string{
"*": allMetadata,
},
- Events: payloadBuf.Bytes(),
+ Events: events,
}
- // Create a new buffer to encode the visibility payload in MessagePack format
- encodedBuf := new(bytes.Buffer)
- if err := msgp.Encode(encodedBuf, &visibilityPayload); err != nil {
- return nil, err
+ // Check for the test session name and append the tag at the metadata level
+ if testSessionName, ok := utils.GetCITags()[constants.TestSessionName]; ok {
+ testSessionMap := map[string]string{
+ constants.TestSessionName: testSessionName,
+ }
+ visibilityPayload.Metadata["test_session_end"] = testSessionMap
+ visibilityPayload.Metadata["test_module_end"] = testSessionMap
+ visibilityPayload.Metadata["test_suite_end"] = testSessionMap
+ visibilityPayload.Metadata["test"] = testSessionMap
}
- return encodedBuf, nil
+ return visibilityPayload
}
diff --git a/ddtrace/tracer/civisibility_payload_test.go b/ddtrace/tracer/civisibility_payload_test.go
index 4057bb36e7..b1a32f8110 100644
--- a/ddtrace/tracer/civisibility_payload_test.go
+++ b/ddtrace/tracer/civisibility_payload_test.go
@@ -7,6 +7,7 @@ package tracer
import (
"bytes"
+ "encoding/json"
"io"
"strconv"
"strings"
@@ -15,6 +16,10 @@ import (
"github.com/stretchr/testify/assert"
"github.com/tinylib/msgp/msgp"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/version"
)
func newCiVisibilityEventsList(n int) []*ciVisibilityEvent {
@@ -80,6 +85,50 @@ func TestCiVisibilityPayloadDecode(t *testing.T) {
}
}
+func TestCiVisibilityPayloadEnvelope(t *testing.T) {
+ assert := assert.New(t)
+ p := newCiVisibilityPayload()
+ payload := p.writeEnvelope("none", []byte{})
+
+ // Encode the payload to message pack
+ encodedBuf := new(bytes.Buffer)
+ err := msgp.Encode(encodedBuf, payload)
+ assert.NoError(err)
+
+ // Convert the message pack to json
+ jsonBuf := new(bytes.Buffer)
+ _, err = msgp.CopyToJSON(jsonBuf, encodedBuf)
+ assert.NoError(err)
+
+ // Decode the json payload
+ var testCyclePayload ciTestCyclePayload
+ err = json.Unmarshal(jsonBuf.Bytes(), &testCyclePayload)
+ assert.NoError(err)
+
+ // Now let's assert the decoded envelope metadata
+ assert.Contains(testCyclePayload.Metadata, "*")
+ assert.Subset(testCyclePayload.Metadata["*"], map[string]string{
+ "language": "go",
+ "runtime-id": globalconfig.RuntimeID(),
+ "library_version": version.Tag,
+ })
+
+ testSessionName := utils.GetCITags()[constants.TestSessionName]
+ testSessionMap := map[string]string{constants.TestSessionName: testSessionName}
+
+ assert.Contains(testCyclePayload.Metadata, "test_session_end")
+ assert.Subset(testCyclePayload.Metadata["test_session_end"], testSessionMap)
+
+ assert.Contains(testCyclePayload.Metadata, "test_module_end")
+ assert.Subset(testCyclePayload.Metadata["test_module_end"], testSessionMap)
+
+ assert.Contains(testCyclePayload.Metadata, "test_suite_end")
+ assert.Subset(testCyclePayload.Metadata["test_suite_end"], testSessionMap)
+
+ assert.Contains(testCyclePayload.Metadata, "test")
+ assert.Subset(testCyclePayload.Metadata["test"], testSessionMap)
+}
+
func BenchmarkCiVisibilityPayloadThroughput(b *testing.B) {
b.Run("10K", benchmarkCiVisibilityPayloadThroughput(1))
b.Run("100K", benchmarkCiVisibilityPayloadThroughput(10))
diff --git a/ddtrace/tracer/civisibility_transport.go b/ddtrace/tracer/civisibility_transport.go
index db64b5d73d..0731332022 100644
--- a/ddtrace/tracer/civisibility_transport.go
+++ b/ddtrace/tracer/civisibility_transport.go
@@ -105,6 +105,8 @@ func newCiVisibilityTransport(config *config) *ciVisibilityTransport {
testCycleURL = fmt.Sprintf("%s/%s/%s", config.agentURL.String(), EvpProxyPath, TestCyclePath)
}
+ log.Debug("ciVisibilityTransport: creating transport instance [agentless: %v, testcycleurl: %v]", agentlessEnabled, testCycleURL)
+
return &ciVisibilityTransport{
config: config,
testCycleURLPath: testCycleURL,
@@ -157,6 +159,7 @@ func (t *ciVisibilityTransport) send(p *payload) (body io.ReadCloser, err error)
req.Header.Set("Content-Encoding", "gzip")
}
+ log.Debug("ciVisibilityTransport: sending transport request: %v bytes", buffer.Len())
response, err := t.config.httpClient.Do(req)
if err != nil {
return nil, err
diff --git a/ddtrace/tracer/civisibility_tslv.go b/ddtrace/tracer/civisibility_tslv.go
index 377f6d5656..ef6614d48f 100644
--- a/ddtrace/tracer/civisibility_tslv.go
+++ b/ddtrace/tracer/civisibility_tslv.go
@@ -273,6 +273,7 @@ func getCiVisibilityEvent(span *span) *ciVisibilityEvent {
// A pointer to the created ciVisibilityEvent.
func createTestEventFromSpan(span *span) *ciVisibilityEvent {
tSpan := createTslvSpan(span)
+ tSpan.ParentID = 0
tSpan.SessionID = getAndRemoveMetaToUInt64(span, constants.TestSessionIDTag)
tSpan.ModuleID = getAndRemoveMetaToUInt64(span, constants.TestModuleIDTag)
tSpan.SuiteID = getAndRemoveMetaToUInt64(span, constants.TestSuiteIDTag)
@@ -298,6 +299,7 @@ func createTestEventFromSpan(span *span) *ciVisibilityEvent {
// A pointer to the created ciVisibilityEvent.
func createTestSuiteEventFromSpan(span *span) *ciVisibilityEvent {
tSpan := createTslvSpan(span)
+ tSpan.ParentID = 0
tSpan.SessionID = getAndRemoveMetaToUInt64(span, constants.TestSessionIDTag)
tSpan.ModuleID = getAndRemoveMetaToUInt64(span, constants.TestModuleIDTag)
tSpan.SuiteID = getAndRemoveMetaToUInt64(span, constants.TestSuiteIDTag)
@@ -320,6 +322,7 @@ func createTestSuiteEventFromSpan(span *span) *ciVisibilityEvent {
// A pointer to the created ciVisibilityEvent.
func createTestModuleEventFromSpan(span *span) *ciVisibilityEvent {
tSpan := createTslvSpan(span)
+ tSpan.ParentID = 0
tSpan.SessionID = getAndRemoveMetaToUInt64(span, constants.TestSessionIDTag)
tSpan.ModuleID = getAndRemoveMetaToUInt64(span, constants.TestModuleIDTag)
return &ciVisibilityEvent{
@@ -341,6 +344,7 @@ func createTestModuleEventFromSpan(span *span) *ciVisibilityEvent {
// A pointer to the created ciVisibilityEvent.
func createTestSessionEventFromSpan(span *span) *ciVisibilityEvent {
tSpan := createTslvSpan(span)
+ tSpan.ParentID = 0
tSpan.SessionID = getAndRemoveMetaToUInt64(span, constants.TestSessionIDTag)
return &ciVisibilityEvent{
span: span,
diff --git a/ddtrace/tracer/civisibility_writer.go b/ddtrace/tracer/civisibility_writer.go
index 1582b200a8..969b5edea6 100644
--- a/ddtrace/tracer/civisibility_writer.go
+++ b/ddtrace/tracer/civisibility_writer.go
@@ -45,6 +45,7 @@ type ciVisibilityTraceWriter struct {
//
// A pointer to an initialized ciVisibilityTraceWriter.
func newCiVisibilityTraceWriter(c *config) *ciVisibilityTraceWriter {
+ log.Debug("ciVisibilityTraceWriter: creating trace writer instance")
return &ciVisibilityTraceWriter{
config: c,
payload: newCiVisibilityPayload(),
@@ -62,7 +63,7 @@ func (w *ciVisibilityTraceWriter) add(trace []*span) {
for _, s := range trace {
cvEvent := getCiVisibilityEvent(s)
if err := w.payload.push(cvEvent); err != nil {
- log.Error("Error encoding msgpack: %v", err)
+ log.Error("ciVisibilityTraceWriter: Error encoding msgpack: %v", err)
}
if w.payload.size() > agentlessPayloadSizeLimit {
w.flush()
@@ -104,16 +105,16 @@ func (w *ciVisibilityTraceWriter) flush() {
var err error
for attempt := 0; attempt <= w.config.sendRetries; attempt++ {
size, count = p.size(), p.itemCount()
- log.Debug("Sending payload: size: %d events: %d\n", size, count)
+ log.Debug("ciVisibilityTraceWriter: sending payload: size: %d events: %d\n", size, count)
_, err = w.config.transport.send(p.payload)
if err == nil {
- log.Debug("sent events after %d attempts", attempt+1)
+ log.Debug("ciVisibilityTraceWriter: sent events after %d attempts", attempt+1)
return
}
- log.Error("failure sending events (attempt %d), will retry: %v", attempt+1, err)
+ log.Error("ciVisibilityTraceWriter: failure sending events (attempt %d), will retry: %v", attempt+1, err)
p.reset()
time.Sleep(time.Millisecond)
}
- log.Error("lost %d events: %v", count, err)
+ log.Error("ciVisibilityTraceWriter: lost %d events: %v", count, err)
}(oldp)
}
diff --git a/ddtrace/tracer/log.go b/ddtrace/tracer/log.go
index b1351edb34..b88d477c97 100644
--- a/ddtrace/tracer/log.go
+++ b/ddtrace/tracer/log.go
@@ -92,7 +92,18 @@ func logStartup(t *tracer) {
featureFlags = append(featureFlags, f)
}
- cp, _ := t.config.propagator.(*chainedPropagator)
+ var injectorNames, extractorNames string
+ switch v := t.config.propagator.(type) {
+ case *chainedPropagator:
+ injectorNames = v.injectorNames
+ extractorNames = v.extractorsNames
+ case nil:
+ injectorNames = ""
+ extractorNames = ""
+ default:
+ injectorNames = "custom"
+ extractorNames = "custom"
+ }
info := startupInfo{
Date: time.Now().Format(time.RFC3339),
@@ -127,8 +138,8 @@ func logStartup(t *tracer) {
PartialFlushMinSpans: t.config.partialFlushMinSpans,
Orchestrion: t.config.orchestrionCfg,
FeatureFlags: featureFlags,
- PropagationStyleInject: cp.injectorNames,
- PropagationStyleExtract: cp.extractorsNames,
+ PropagationStyleInject: injectorNames,
+ PropagationStyleExtract: extractorNames,
}
if _, _, err := samplingRulesFromEnv(); err != nil {
info.SamplingRulesError = fmt.Sprintf("%s", err)
diff --git a/ddtrace/tracer/log_test.go b/ddtrace/tracer/log_test.go
index e49e6c56f5..fdeb3c72a5 100644
--- a/ddtrace/tracer/log_test.go
+++ b/ddtrace/tracer/log_test.go
@@ -10,6 +10,7 @@ import (
"math"
"testing"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
"gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
@@ -188,3 +189,74 @@ func TestLogFormat(t *testing.T) {
assert.Len(tp.Logs(), 1)
assert.Regexp(logPrefixRegexp+` DEBUG: Started Span: dd.trace_id="12345" dd.span_id="12345" dd.parent_id="0", Operation: test, Resource: /, Tags: map.*, map.*`, tp.Logs()[0])
}
+
+func TestLogPropagators(t *testing.T) {
+ t.Run("default", func(t *testing.T) {
+ assert := assert.New(t)
+ substring := `"propagation_style_inject":"datadog,tracecontext","propagation_style_extract":"datadog,tracecontext"`
+ log := setup(t, nil)
+ assert.Regexp(substring, log)
+ })
+ t.Run("datadog,tracecontext", func(t *testing.T) {
+ assert := assert.New(t)
+ t.Setenv("DD_TRACE_PROPAGATION_STYLE", "datadog,tracecontext")
+ substring := `"propagation_style_inject":"datadog,tracecontext","propagation_style_extract":"datadog,tracecontext"`
+ log := setup(t, nil)
+ assert.Regexp(substring, log)
+ })
+ t.Run("b3multi", func(t *testing.T) {
+ assert := assert.New(t)
+ t.Setenv("DD_TRACE_PROPAGATION_STYLE", "b3multi")
+ substring := `"propagation_style_inject":"b3multi","propagation_style_extract":"b3multi"`
+ log := setup(t, nil)
+ assert.Regexp(substring, log)
+ })
+ t.Run("none", func(t *testing.T) {
+ assert := assert.New(t)
+ t.Setenv("DD_TRACE_PROPAGATION_STYLE", "none")
+ substring := `"propagation_style_inject":"","propagation_style_extract":""`
+ log := setup(t, nil)
+ assert.Regexp(substring, log)
+ })
+ t.Run("different-injector-extractor", func(t *testing.T) {
+ assert := assert.New(t)
+ t.Setenv("DD_TRACE_PROPAGATION_STYLE_INJECT", "b3multi")
+ t.Setenv("DD_TRACE_PROPAGATION_STYLE_EXTRACT", "tracecontext")
+ substring := `"propagation_style_inject":"b3multi","propagation_style_extract":"tracecontext"`
+ log := setup(t, nil)
+ assert.Regexp(substring, log)
+ })
+ t.Run("custom-propagator", func(t *testing.T) {
+ assert := assert.New(t)
+ substring := `"propagation_style_inject":"custom","propagation_style_extract":"custom"`
+ p := &prop{}
+ log := setup(t, p)
+ assert.Regexp(substring, log)
+ })
+}
+
+type prop struct{}
+
+func (p *prop) Inject(context ddtrace.SpanContext, carrier interface{}) (e error) {
+ return
+}
+func (p *prop) Extract(carrier interface{}) (sctx ddtrace.SpanContext, e error) {
+ return
+}
+
+func setup(t *testing.T, customProp Propagator) string {
+ tp := new(log.RecordLogger)
+ var tracer *tracer
+ var stop func()
+ if customProp != nil {
+ tracer, _, _, stop = startTestTracer(t, WithLogger(tp), WithPropagator(customProp))
+ } else {
+ tracer, _, _, stop = startTestTracer(t, WithLogger(tp))
+ }
+ defer stop()
+ tp.Reset()
+ tp.Ignore("appsec: ", telemetry.LogPrefix)
+ logStartup(tracer)
+ require.Len(t, tp.Logs(), 2)
+ return tp.Logs()[1]
+}
diff --git a/ddtrace/tracer/option.go b/ddtrace/tracer/option.go
index 817dd29b41..ab3e502723 100644
--- a/ddtrace/tracer/option.go
+++ b/ddtrace/tracer/option.go
@@ -279,6 +279,9 @@ type config struct {
// ciVisibilityEnabled controls if the tracer is loaded with CI Visibility mode. default false
ciVisibilityEnabled bool
+
+ // logDirectory is directory for tracer logs specified by user-setting DD_TRACE_LOG_DIRECTORY. default empty/unused
+ logDirectory string
}
// orchestrionConfig contains Orchestrion configuration.
@@ -383,6 +386,7 @@ func newConfig(opts ...StartOption) *config {
c.runtimeMetrics = internal.BoolVal(getDDorOtelConfig("metrics"), false)
c.runtimeMetricsV2 = internal.BoolVal("DD_RUNTIME_METRICS_V2_ENABLED", false)
c.debug = internal.BoolVal(getDDorOtelConfig("debugMode"), false)
+ c.logDirectory = os.Getenv("DD_TRACE_LOG_DIRECTORY")
c.enabled = newDynamicConfig("tracing_enabled", internal.BoolVal(getDDorOtelConfig("enabled"), true), func(b bool) bool { return true }, equal[bool])
if _, ok := os.LookupEnv("DD_TRACE_ENABLED"); ok {
c.enabled.cfgOrigin = telemetry.OriginEnvVar
@@ -509,7 +513,6 @@ func newConfig(opts ...StartOption) *config {
if c.debug {
log.SetLevel(log.LevelDebug)
}
-
// if using stdout or traces are disabled, agent is disabled
agentDisabled := c.logToStdout || !c.enabled.current
c.agent = loadAgentFeatures(agentDisabled, c.agentURL, c.httpClient)
@@ -1294,6 +1297,10 @@ func setHeaderTags(headerAsTags []string) bool {
globalconfig.ClearHeaderTags()
for _, h := range headerAsTags {
header, tag := normalizer.HeaderTag(h)
+ if len(header) == 0 || len(tag) == 0 {
+ log.Debug("Header-tag input is in unsupported format; dropping input value %v", h)
+ continue
+ }
globalconfig.SetHeaderTag(header, tag)
}
return true
diff --git a/ddtrace/tracer/option_test.go b/ddtrace/tracer/option_test.go
index d4259e9f5c..34ebc0a846 100644
--- a/ddtrace/tracer/option_test.go
+++ b/ddtrace/tracer/option_test.go
@@ -318,9 +318,7 @@ type contribPkg struct {
func TestIntegrationEnabled(t *testing.T) {
body, err := exec.Command("go", "list", "-json", "../../contrib/...").Output()
- if err != nil {
- t.Fatalf(err.Error())
- }
+ require.NoError(t, err, "go list command failed")
var packages []contribPkg
stream := json.NewDecoder(strings.NewReader(string(body)))
for stream.More() {
@@ -337,9 +335,7 @@ func TestIntegrationEnabled(t *testing.T) {
}
p := strings.Replace(pkg.Dir, pkg.Root, "../..", 1)
body, err := exec.Command("grep", "-rl", "MarkIntegrationImported", p).Output()
- if err != nil {
- t.Fatalf(err.Error())
- }
+ require.NoError(t, err, "grep command failed")
assert.NotEqual(t, len(body), 0, "expected %s to call MarkIntegrationImported", pkg.Name)
}
}
@@ -1392,6 +1388,28 @@ func TestWithHeaderTags(t *testing.T) {
assert.Equal(ext.HTTPRequestHeaders+".2_h_e_a_d_e_r", globalconfig.HeaderTag("2.h.e.a.d.e.r"))
})
+ t.Run("envvar-invalid", func(t *testing.T) {
+ defer globalconfig.ClearHeaderTags()
+ t.Setenv("DD_TRACE_HEADER_TAGS", "header1:")
+
+ assert := assert.New(t)
+ newConfig()
+
+ assert.Equal(0, globalconfig.HeaderTagsLen())
+ })
+
+ t.Run("envvar-partially-invalid", func(t *testing.T) {
+ defer globalconfig.ClearHeaderTags()
+ t.Setenv("DD_TRACE_HEADER_TAGS", "header1,header2:")
+
+ assert := assert.New(t)
+ newConfig()
+
+ assert.Equal(1, globalconfig.HeaderTagsLen())
+ fmt.Println(globalconfig.HeaderTagMap())
+ assert.Equal(ext.HTTPRequestHeaders+".header1", globalconfig.HeaderTag("Header1"))
+ })
+
t.Run("env-override", func(t *testing.T) {
defer globalconfig.ClearHeaderTags()
assert := assert.New(t)
diff --git a/ddtrace/tracer/rand.go b/ddtrace/tracer/rand.go
index 8eb04c0fed..192a6725f9 100644
--- a/ddtrace/tracer/rand.go
+++ b/ddtrace/tracer/rand.go
@@ -3,68 +3,17 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016 Datadog, Inc.
-//go:build !go1.22
-
-// TODO(knusbaum): This file should be deleted once go1.21 falls out of support
package tracer
import (
- cryptorand "crypto/rand"
"math"
- "math/big"
- "math/rand"
- "sync"
- "time"
-
- "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+ "math/rand/v2"
)
-// random holds a thread-safe source of random numbers.
-var random *rand.Rand
-
-func init() {
- var seed int64
- n, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64))
- if err == nil {
- seed = n.Int64()
- } else {
- log.Warn("cannot generate random seed: %v; using current time", err)
- seed = time.Now().UnixNano()
- }
- random = rand.New(&safeSource{
- source: rand.NewSource(seed),
- })
-}
-
-// safeSource holds a thread-safe implementation of rand.Source64.
-type safeSource struct {
- source rand.Source
- sync.Mutex
-}
-
-func (rs *safeSource) Int63() int64 {
- rs.Lock()
- n := rs.source.Int63()
- rs.Unlock()
-
- return n
-}
-
-func (rs *safeSource) Uint64() uint64 { return uint64(rs.Int63()) }
-
-func (rs *safeSource) Seed(seed int64) {
- rs.Lock()
- rs.source.Seed(seed)
- rs.Unlock()
+func randUint64() uint64 {
+ return rand.Uint64()
}
-// generateSpanID returns a random uint64 that has been XORd with the startTime.
-// This is done to get around the 32-bit random seed limitation that may create collisions if there is a large number
-// of go services all generating spans.
func generateSpanID(startTime int64) uint64 {
- return random.Uint64() ^ uint64(startTime)
-}
-
-func randUint64() uint64 {
- return random.Uint64()
+ return rand.Uint64() & math.MaxInt64
}
diff --git a/ddtrace/tracer/rand_go1_22.go b/ddtrace/tracer/rand_go1_22.go
deleted file mode 100644
index 9e7948e47e..0000000000
--- a/ddtrace/tracer/rand_go1_22.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-//go:build go1.22
-
-package tracer
-
-import (
- "math"
- "math/rand/v2"
-)
-
-func randUint64() uint64 {
- return rand.Uint64()
-}
-
-func generateSpanID(startTime int64) uint64 {
- return rand.Uint64() & math.MaxInt64
-}
diff --git a/ddtrace/tracer/rules_sampler.go b/ddtrace/tracer/rules_sampler.go
index 2cd911e3f7..037e393642 100644
--- a/ddtrace/tracer/rules_sampler.go
+++ b/ddtrace/tracer/rules_sampler.go
@@ -19,6 +19,7 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
"gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
"golang.org/x/time/rate"
)
@@ -533,6 +534,7 @@ const defaultRateLimit = 100.0
// The limit is DD_TRACE_RATE_LIMIT if set, `defaultRateLimit` otherwise.
func newRateLimiter() *rateLimiter {
limit := defaultRateLimit
+ origin := telemetry.OriginDefault
v := os.Getenv("DD_TRACE_RATE_LIMIT")
if v != "" {
l, err := strconv.ParseFloat(v, 64)
@@ -542,9 +544,11 @@ func newRateLimiter() *rateLimiter {
log.Warn("DD_TRACE_RATE_LIMIT negative, using default value %f", limit)
} else {
// override the default limit
+ origin = telemetry.OriginEnvVar
limit = l
}
}
+ reportTelemetryOnAppStarted(telemetry.Configuration{Name: "trace_rate_limit", Value: limit, Origin: origin})
return &rateLimiter{
limiter: rate.NewLimiter(rate.Limit(limit), int(math.Ceil(limit))),
prevTime: time.Now(),
diff --git a/ddtrace/tracer/spancontext_test.go b/ddtrace/tracer/spancontext_test.go
index f1cc69d787..6799fee022 100644
--- a/ddtrace/tracer/spancontext_test.go
+++ b/ddtrace/tracer/spancontext_test.go
@@ -148,6 +148,25 @@ func TestSpanTracePushOne(t *testing.T) {
assert.Equal(0, len(trace.spans), "no more spans in the trace")
}
+// Tests to confirm that when the payload queue is full, chunks are dropped
+// and the associated trace is counted as dropped.
+func TestTraceFinishChunk(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newUnstartedTracer()
+ defer tracer.statsd.Close()
+
+ root := newSpan("name", "service", "resource", 0, 0, 0)
+ trace := root.context.trace
+
+ for i := 0; i < payloadQueueSize+1; i++ {
+ trace.mu.Lock()
+ c := chunk{spans: make([]*span, 1)}
+ trace.finishChunk(tracer, &c)
+ trace.mu.Unlock()
+ }
+ assert.Equal(uint32(1), tracer.totalTracesDropped)
+}
+
func TestPartialFlush(t *testing.T) {
t.Setenv("DD_TRACE_PARTIAL_FLUSH_ENABLED", "true")
t.Setenv("DD_TRACE_PARTIAL_FLUSH_MIN_SPANS", "2")
diff --git a/ddtrace/tracer/stats_payload_msgp.go b/ddtrace/tracer/stats_payload_msgp.go
index 7d15d036e7..70a3a0b4c4 100644
--- a/ddtrace/tracer/stats_payload_msgp.go
+++ b/ddtrace/tracer/stats_payload_msgp.go
@@ -2,12 +2,9 @@
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016 Datadog, Inc.
-
package tracer
-// NOTE: THIS FILE WAS PRODUCED BY THE
-// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
-// DO NOT EDIT
+// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
@@ -20,83 +17,105 @@ func (z *groupedStats) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
+ err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
+ err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Service":
z.Service, err = dc.ReadString()
if err != nil {
+ err = msgp.WrapError(err, "Service")
return
}
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
+ err = msgp.WrapError(err, "Name")
return
}
case "Resource":
z.Resource, err = dc.ReadString()
if err != nil {
+ err = msgp.WrapError(err, "Resource")
return
}
case "HTTPStatusCode":
z.HTTPStatusCode, err = dc.ReadUint32()
if err != nil {
+ err = msgp.WrapError(err, "HTTPStatusCode")
return
}
case "Type":
z.Type, err = dc.ReadString()
if err != nil {
+ err = msgp.WrapError(err, "Type")
return
}
case "DBType":
z.DBType, err = dc.ReadString()
if err != nil {
+ err = msgp.WrapError(err, "DBType")
return
}
case "Hits":
z.Hits, err = dc.ReadUint64()
if err != nil {
+ err = msgp.WrapError(err, "Hits")
return
}
case "Errors":
z.Errors, err = dc.ReadUint64()
if err != nil {
+ err = msgp.WrapError(err, "Errors")
return
}
case "Duration":
z.Duration, err = dc.ReadUint64()
if err != nil {
+ err = msgp.WrapError(err, "Duration")
return
}
case "OkSummary":
z.OkSummary, err = dc.ReadBytes(z.OkSummary)
if err != nil {
+ err = msgp.WrapError(err, "OkSummary")
return
}
case "ErrorSummary":
z.ErrorSummary, err = dc.ReadBytes(z.ErrorSummary)
if err != nil {
+ err = msgp.WrapError(err, "ErrorSummary")
return
}
case "Synthetics":
z.Synthetics, err = dc.ReadBool()
if err != nil {
+ err = msgp.WrapError(err, "Synthetics")
return
}
case "TopLevelHits":
z.TopLevelHits, err = dc.ReadUint64()
if err != nil {
+ err = msgp.WrapError(err, "TopLevelHits")
+ return
+ }
+ case "IsTraceRoot":
+ z.IsTraceRoot, err = dc.ReadInt32()
+ if err != nil {
+ err = msgp.WrapError(err, "IsTraceRoot")
return
}
default:
err = dc.Skip()
if err != nil {
+ err = msgp.WrapError(err)
return
}
}
@@ -106,14 +125,15 @@ func (z *groupedStats) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable
func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
- // map header, size 13
+ // map header, size 14
// write "Service"
- err = en.Append(0x8d, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65)
+ err = en.Append(0x8e, 0xa7, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Service)
if err != nil {
+ err = msgp.WrapError(err, "Service")
return
}
// write "Name"
@@ -123,6 +143,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteString(z.Name)
if err != nil {
+ err = msgp.WrapError(err, "Name")
return
}
// write "Resource"
@@ -132,6 +153,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteString(z.Resource)
if err != nil {
+ err = msgp.WrapError(err, "Resource")
return
}
// write "HTTPStatusCode"
@@ -141,6 +163,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteUint32(z.HTTPStatusCode)
if err != nil {
+ err = msgp.WrapError(err, "HTTPStatusCode")
return
}
// write "Type"
@@ -150,6 +173,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteString(z.Type)
if err != nil {
+ err = msgp.WrapError(err, "Type")
return
}
// write "DBType"
@@ -159,6 +183,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteString(z.DBType)
if err != nil {
+ err = msgp.WrapError(err, "DBType")
return
}
// write "Hits"
@@ -168,6 +193,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteUint64(z.Hits)
if err != nil {
+ err = msgp.WrapError(err, "Hits")
return
}
// write "Errors"
@@ -177,6 +203,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteUint64(z.Errors)
if err != nil {
+ err = msgp.WrapError(err, "Errors")
return
}
// write "Duration"
@@ -186,6 +213,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteUint64(z.Duration)
if err != nil {
+ err = msgp.WrapError(err, "Duration")
return
}
// write "OkSummary"
@@ -195,6 +223,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteBytes(z.OkSummary)
if err != nil {
+ err = msgp.WrapError(err, "OkSummary")
return
}
// write "ErrorSummary"
@@ -204,6 +233,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteBytes(z.ErrorSummary)
if err != nil {
+ err = msgp.WrapError(err, "ErrorSummary")
return
}
// write "Synthetics"
@@ -213,6 +243,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteBool(z.Synthetics)
if err != nil {
+ err = msgp.WrapError(err, "Synthetics")
return
}
// write "TopLevelHits"
@@ -222,6 +253,17 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteUint64(z.TopLevelHits)
if err != nil {
+ err = msgp.WrapError(err, "TopLevelHits")
+ return
+ }
+ // write "IsTraceRoot"
+ err = en.Append(0xab, 0x49, 0x73, 0x54, 0x72, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x6f, 0x74)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt32(z.IsTraceRoot)
+ if err != nil {
+ err = msgp.WrapError(err, "IsTraceRoot")
return
}
return
@@ -229,7 +271,7 @@ func (z *groupedStats) EncodeMsg(en *msgp.Writer) (err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *groupedStats) Msgsize() (s int) {
- s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 15 + msgp.Uint32Size + 5 + msgp.StringPrefixSize + len(z.Type) + 7 + msgp.StringPrefixSize + len(z.DBType) + 5 + msgp.Uint64Size + 7 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.BytesPrefixSize + len(z.OkSummary) + 13 + msgp.BytesPrefixSize + len(z.ErrorSummary) + 11 + msgp.BoolSize + 13 + msgp.Uint64Size
+ s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 15 + msgp.Uint32Size + 5 + msgp.StringPrefixSize + len(z.Type) + 7 + msgp.StringPrefixSize + len(z.DBType) + 5 + msgp.Uint64Size + 7 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.BytesPrefixSize + len(z.OkSummary) + 13 + msgp.BytesPrefixSize + len(z.ErrorSummary) + 11 + msgp.BoolSize + 13 + msgp.Uint64Size + 12 + msgp.Int32Size
return
}
@@ -240,29 +282,34 @@ func (z *statsBucket) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
+ err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
+ err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Start":
z.Start, err = dc.ReadUint64()
if err != nil {
+ err = msgp.WrapError(err, "Start")
return
}
case "Duration":
z.Duration, err = dc.ReadUint64()
if err != nil {
+ err = msgp.WrapError(err, "Duration")
return
}
case "Stats":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
+ err = msgp.WrapError(err, "Stats")
return
}
if cap(z.Stats) >= int(zb0002) {
@@ -273,12 +320,14 @@ func (z *statsBucket) DecodeMsg(dc *msgp.Reader) (err error) {
for za0001 := range z.Stats {
err = z.Stats[za0001].DecodeMsg(dc)
if err != nil {
+ err = msgp.WrapError(err, "Stats", za0001)
return
}
}
default:
err = dc.Skip()
if err != nil {
+ err = msgp.WrapError(err)
return
}
}
@@ -296,6 +345,7 @@ func (z *statsBucket) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteUint64(z.Start)
if err != nil {
+ err = msgp.WrapError(err, "Start")
return
}
// write "Duration"
@@ -305,6 +355,7 @@ func (z *statsBucket) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteUint64(z.Duration)
if err != nil {
+ err = msgp.WrapError(err, "Duration")
return
}
// write "Stats"
@@ -314,11 +365,13 @@ func (z *statsBucket) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteArrayHeader(uint32(len(z.Stats)))
if err != nil {
+ err = msgp.WrapError(err, "Stats")
return
}
for za0001 := range z.Stats {
err = z.Stats[za0001].EncodeMsg(en)
if err != nil {
+ err = msgp.WrapError(err, "Stats", za0001)
return
}
}
@@ -341,34 +394,40 @@ func (z *statsPayload) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
+ err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
+ err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Hostname":
z.Hostname, err = dc.ReadString()
if err != nil {
+ err = msgp.WrapError(err, "Hostname")
return
}
case "Env":
z.Env, err = dc.ReadString()
if err != nil {
+ err = msgp.WrapError(err, "Env")
return
}
case "Version":
z.Version, err = dc.ReadString()
if err != nil {
+ err = msgp.WrapError(err, "Version")
return
}
case "Stats":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
+ err = msgp.WrapError(err, "Stats")
return
}
if cap(z.Stats) >= int(zb0002) {
@@ -379,12 +438,14 @@ func (z *statsPayload) DecodeMsg(dc *msgp.Reader) (err error) {
for za0001 := range z.Stats {
err = z.Stats[za0001].DecodeMsg(dc)
if err != nil {
+ err = msgp.WrapError(err, "Stats", za0001)
return
}
}
default:
err = dc.Skip()
if err != nil {
+ err = msgp.WrapError(err)
return
}
}
@@ -402,6 +463,7 @@ func (z *statsPayload) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteString(z.Hostname)
if err != nil {
+ err = msgp.WrapError(err, "Hostname")
return
}
// write "Env"
@@ -411,6 +473,7 @@ func (z *statsPayload) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteString(z.Env)
if err != nil {
+ err = msgp.WrapError(err, "Env")
return
}
// write "Version"
@@ -420,6 +483,7 @@ func (z *statsPayload) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteString(z.Version)
if err != nil {
+ err = msgp.WrapError(err, "Version")
return
}
// write "Stats"
@@ -429,11 +493,13 @@ func (z *statsPayload) EncodeMsg(en *msgp.Writer) (err error) {
}
err = en.WriteArrayHeader(uint32(len(z.Stats)))
if err != nil {
+ err = msgp.WrapError(err, "Stats")
return
}
for za0001 := range z.Stats {
err = z.Stats[za0001].EncodeMsg(en)
if err != nil {
+ err = msgp.WrapError(err, "Stats", za0001)
return
}
}
diff --git a/ddtrace/tracer/telemetry.go b/ddtrace/tracer/telemetry.go
index 45ed0fda84..df0d598aee 100644
--- a/ddtrace/tracer/telemetry.go
+++ b/ddtrace/tracer/telemetry.go
@@ -12,6 +12,12 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/internal/telemetry"
)
+var additionalConfigs []telemetry.Configuration
+
+func reportTelemetryOnAppStarted(c telemetry.Configuration) {
+ additionalConfigs = append(additionalConfigs, c)
+}
+
// startTelemetry starts the global instrumentation telemetry client with tracer data
// unless instrumentation telemetry is disabled via the DD_INSTRUMENTATION_TELEMETRY_ENABLED
// env var.
@@ -44,7 +50,8 @@ func startTelemetry(c *config) {
{Name: "service", Value: c.serviceName},
{Name: "universal_version", Value: c.universalVersion},
{Name: "env", Value: c.env},
- {Name: "agent_url", Value: c.agentURL.String()},
+ {Name: "version", Value: c.version},
+ {Name: "trace_agent_url", Value: c.agentURL.String()},
{Name: "agent_hostname", Value: c.hostname},
{Name: "runtime_metrics_enabled", Value: c.runtimeMetrics},
{Name: "runtime_metrics_v2_enabled", Value: c.runtimeMetricsV2},
@@ -56,6 +63,7 @@ func startTelemetry(c *config) {
{Name: "trace_peer_service_defaults_enabled", Value: c.peerServiceDefaultsEnabled},
{Name: "orchestrion_enabled", Value: c.orchestrionCfg.Enabled},
{Name: "trace_enabled", Value: c.enabled.current, Origin: c.enabled.cfgOrigin},
+ {Name: "trace_log_directory", Value: c.logDirectory},
c.traceSampleRate.toTelemetry(),
c.headerAsTags.toTelemetry(),
c.globalTags.toTelemetry(),
@@ -103,5 +111,6 @@ func startTelemetry(c *config) {
telemetryConfigs = append(telemetryConfigs, telemetry.Configuration{Name: "orchestrion_" + k, Value: v})
}
}
+ telemetryConfigs = append(telemetryConfigs, additionalConfigs...)
telemetry.GlobalClient.ProductChange(telemetry.NamespaceTracers, true, telemetryConfigs)
}
diff --git a/ddtrace/tracer/tracer.go b/ddtrace/tracer/tracer.go
index 4a59a2d4d1..c2cbbf9a50 100644
--- a/ddtrace/tracer/tracer.go
+++ b/ddtrace/tracer/tracer.go
@@ -83,6 +83,11 @@ type tracer struct {
// finished, and dropped
spansStarted, spansFinished, tracesDropped uint32
+ // Keeps track of the total number of traces dropped for accurate logging.
+ totalTracesDropped uint32
+
+ logDroppedTraces *time.Ticker
+
// Records the number of dropped P0 traces and spans.
droppedP0Traces, droppedP0Spans uint32
@@ -108,6 +113,11 @@ type tracer struct {
// abandonedSpansDebugger specifies where and how potentially abandoned spans are stored
// when abandoned spans debugging is enabled.
abandonedSpansDebugger *abandonedSpansDebugger
+
+ // logFile contains a pointer to the file for writing tracer logs along with helper functionality for closing the file
+ // logFile is closed when tracer stops
+ // by default, tracer logs to stderr and this setting is unused
+ logFile *log.ManagedFile
}
const (
@@ -269,6 +279,14 @@ func newUnstartedTracer(opts ...StartOption) *tracer {
if c.dataStreamsMonitoringEnabled {
dataStreamsProcessor = datastreams.NewProcessor(statsd, c.env, c.serviceName, c.version, c.agentURL, c.httpClient)
}
+ var logFile *log.ManagedFile
+ if v := c.logDirectory; v != "" {
+ logFile, err = log.OpenFileAtPath(v)
+ if err != nil {
+ log.Warn("%v", err)
+ c.logDirectory = ""
+ }
+ }
t := &tracer{
config: c,
traceWriter: writer,
@@ -278,6 +296,7 @@ func newUnstartedTracer(opts ...StartOption) *tracer {
rulesSampling: rulesSampler,
prioritySampling: sampler,
pid: os.Getpid(),
+ logDroppedTraces: time.NewTicker(1 * time.Second),
stats: newConcentrator(c, defaultStatsBucketSize),
obfuscator: obfuscate.NewObfuscator(obfuscate.Config{
SQL: obfuscate.SQLConfig{
@@ -290,6 +309,7 @@ func newUnstartedTracer(opts ...StartOption) *tracer {
}),
statsd: statsd,
dataStreams: dataStreamsProcessor,
+ logFile: logFile,
}
return t
}
@@ -461,7 +481,15 @@ func (t *tracer) pushChunk(trace *chunk) {
select {
case t.out <- trace:
default:
- log.Error("payload queue full, dropping %d traces", len(trace.spans))
+ log.Debug("payload queue full, trace dropped %d spans", len(trace.spans))
+ atomic.AddUint32(&t.totalTracesDropped, 1)
+ }
+ select {
+ case <-t.logDroppedTraces.C:
+ if t := atomic.SwapUint32(&t.totalTracesDropped, 0); t > 0 {
+ log.Error("%d traces dropped through payload queue", t)
+ }
+ default:
}
}
@@ -675,6 +703,10 @@ func (t *tracer) Stop() {
}
appsec.Stop()
remoteconfig.Stop()
+ // Close log file last to account for any logs from the above calls
+ if t.logFile != nil {
+ t.logFile.Close()
+ }
}
// Inject uses the configured or default TextMap Propagator.
diff --git a/ddtrace/tracer/tracer_test.go b/ddtrace/tracer/tracer_test.go
index decf4407ea..dc88eb1323 100644
--- a/ddtrace/tracer/tracer_test.go
+++ b/ddtrace/tracer/tracer_test.go
@@ -255,6 +255,27 @@ func TestTracerStart(t *testing.T) {
})
}
+func TestTracerLogFile(t *testing.T) {
+ t.Run("valid", func(t *testing.T) {
+ dir, err := os.MkdirTemp("", "example")
+ if err != nil {
+ t.Fatalf("Failure to make temp dir: %v", err)
+ }
+ t.Setenv("DD_TRACE_LOG_DIRECTORY", dir)
+ tracer := newTracer()
+ assert.Equal(t, dir, tracer.config.logDirectory)
+ assert.NotNil(t, tracer.logFile)
+ assert.Equal(t, dir+"/"+log.LoggerFile, tracer.logFile.Name())
+ })
+ t.Run("invalid", func(t *testing.T) {
+ t.Setenv("DD_TRACE_LOG_DIRECTORY", "some/nonexistent/path")
+ tracer := newTracer()
+ defer Stop()
+ assert.Empty(t, tracer.config.logDirectory)
+ assert.Nil(t, tracer.logFile)
+ })
+}
+
func TestTracerStartSpan(t *testing.T) {
t.Run("generic", func(t *testing.T) {
tracer := newTracer()
diff --git a/docker-compose.yaml b/docker-compose.yaml
index 17788504ee..76680be932 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -113,23 +113,24 @@ services:
image: memcached:1.5.9
ports:
- "11211:11211"
- zookeeper:
- image: bitnami/zookeeper:latest
+ kafka:
+ image: confluentinc/confluent-local:7.5.0
environment:
- ALLOW_ANONYMOUS_LOGIN: "yes"
- ports:
- - "2181:2181"
- kafka2:
- image: darccio/kafka:2.13-2.8.1
- environment:
- KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092
- KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
- #KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT,LISTENER_NAME:PLAINTEXT
- KAFKA_CREATE_TOPICS: gotest:1:1,gosegtest:1:1
- KAFKA_BROKER_ID: 1
- depends_on:
- - zookeeper
+ KAFKA_LISTENERS: "PLAINTEXT://0.0.0.0:9093,BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9094"
+ KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://localhost:9093,BROKER://localhost:9092"
+ KAFKA_REST_BOOTSTRAP_SERVERS: "PLAINTEXT://0.0.0.0:9093,BROKER://0.0.0.0:9092"
+ KAFKA_CONTROLLER_QUORUM_VOTERS: "1@localhost:9094"
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "BROKER:PLAINTEXT,PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT"
+ KAFKA_INTER_BROKER_LISTENER_NAME: "BROKER"
+ KAFKA_BROKER_ID: "1"
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1"
+ KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1"
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: "1"
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: "1"
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: "0"
+ KAFKA_NODE_ID: "1"
+ KAFKA_PROCESS_ROLES: "broker,controller"
+ KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
ports:
- "9092:9092"
localstack:
diff --git a/go.mod b/go.mod
index 3f10f68b67..e9afd1d301 100644
--- a/go.mod
+++ b/go.mod
@@ -1,15 +1,15 @@
module gopkg.in/DataDog/dd-trace-go.v1
-go 1.21
+go 1.22.0
require (
cloud.google.com/go/pubsub v1.33.0
github.com/99designs/gqlgen v0.17.36
- github.com/DataDog/appsec-internal-go v1.7.0
+ github.com/DataDog/appsec-internal-go v1.8.0
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0
- github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1
+ github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0
github.com/DataDog/datadog-go/v5 v5.3.0
- github.com/DataDog/go-libddwaf/v3 v3.3.0
+ github.com/DataDog/go-libddwaf/v3 v3.4.0
github.com/DataDog/go-runtime-metrics-internal v0.0.0-20240819080326-9964da68e4b5
github.com/DataDog/gostackparse v0.7.0
github.com/DataDog/sketches-go v1.4.5
@@ -40,7 +40,6 @@ require (
github.com/emicklei/go-restful/v3 v3.11.0
github.com/garyburd/redigo v1.6.4
github.com/gin-gonic/gin v1.9.1
- github.com/glebarez/go-sqlite v1.22.0
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8
github.com/go-chi/chi v1.5.4
github.com/go-chi/chi/v5 v5.0.10
@@ -83,7 +82,7 @@ require (
github.com/stretchr/testify v1.9.0
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
github.com/tidwall/buntdb v1.3.0
- github.com/tinylib/msgp v1.1.8
+ github.com/tinylib/msgp v1.2.1
github.com/twitchtv/twirp v8.1.3+incompatible
github.com/uptrace/bun v1.1.17
github.com/uptrace/bun/dialect/sqlitedialect v1.1.17
@@ -95,10 +94,10 @@ require (
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0
go.opentelemetry.io/otel v1.20.0
go.opentelemetry.io/otel/trace v1.20.0
- go.uber.org/atomic v1.11.0
- golang.org/x/mod v0.16.0
+ go.uber.org/goleak v1.3.0
+ golang.org/x/mod v0.18.0
golang.org/x/oauth2 v0.9.0
- golang.org/x/sys v0.20.0
+ golang.org/x/sys v0.23.0
golang.org/x/time v0.3.0
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028
google.golang.org/api v0.128.0
@@ -121,7 +120,7 @@ require (
cloud.google.com/go/compute v1.23.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.2 // indirect
- github.com/DataDog/go-tuf v1.0.2-0.5.2 // indirect
+ github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/agnivade/levenshtein v1.1.1 // indirect
github.com/andybalholm/brotli v1.0.6 // indirect
@@ -223,7 +222,7 @@ require (
github.com/montanaflynn/stats v0.6.6 // indirect
github.com/outcaste-io/ristretto v0.2.3 // indirect
github.com/pelletier/go-toml/v2 v2.0.9 // indirect
- github.com/philhofer/fwd v1.1.2 // indirect
+ github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 // indirect
github.com/pierrec/lz4/v4 v4.1.18 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -257,14 +256,15 @@ require (
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/metric v1.20.0 // indirect
+ go.uber.org/atomic v1.11.0 // indirect
golang.org/x/arch v0.4.0 // indirect
- golang.org/x/crypto v0.21.0 // indirect
+ golang.org/x/crypto v0.25.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
- golang.org/x/net v0.23.0 // indirect
- golang.org/x/sync v0.6.0 // indirect
- golang.org/x/term v0.18.0 // indirect
- golang.org/x/text v0.14.0 // indirect
- golang.org/x/tools v0.19.0 // indirect
+ golang.org/x/net v0.26.0 // indirect
+ golang.org/x/sync v0.7.0 // indirect
+ golang.org/x/term v0.22.0 // indirect
+ golang.org/x/text v0.16.0 // indirect
+ golang.org/x/tools v0.22.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect
diff --git a/go.sum b/go.sum
index 2a8d9a4abf..f0c95ecdd6 100644
--- a/go.sum
+++ b/go.sum
@@ -625,21 +625,21 @@ github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9s
github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/DataDog/appsec-internal-go v1.7.0 h1:iKRNLih83dJeVya3IoUfK+6HLD/hQsIbyBlfvLmAeb0=
-github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g=
+github.com/DataDog/appsec-internal-go v1.8.0 h1:1Tfn3LEogntRqZtf88twSApOCAAO3V+NILYhuQIo4J4=
+github.com/DataDog/appsec-internal-go v1.8.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 h1:bUMSNsw1iofWiju9yc1f+kBd33E3hMJtq9GuU602Iy8=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0/go.mod h1:HzySONXnAgSmIQfL6gOv9hWprKJkx8CicuXuUbmgWfo=
-github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 h1:5nE6N3JSs2IG3xzMthNFhXfOaXlrsdgqmJ73lndFf8c=
-github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1/go.mod h1:Vc+snp0Bey4MrrJyiV2tVxxJb6BmLomPvN1RgAvjGaQ=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 h1:LplNAmMgZvGU7kKA0+4c1xWOjz828xweW5TCi8Mw9Q0=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0/go.mod h1:4Vo3SJ24uzfKHUHLoFa8t8o+LH+7TCQ7sPcZDtOpSP4=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go/v5 v5.3.0 h1:2q2qjFOb3RwAZNU+ez27ZVDwErJv5/VpbBPprz7Z+s8=
github.com/DataDog/datadog-go/v5 v5.3.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q=
-github.com/DataDog/go-libddwaf/v3 v3.3.0 h1:jS72fuQpFgJZEdEJDmHJCPAgNTEMZoz1EUvimPUOiJ4=
-github.com/DataDog/go-libddwaf/v3 v3.3.0/go.mod h1:Bz/0JkpGf689mzbUjKJeheJINqsyyhM8p9PDuHdK2Ec=
+github.com/DataDog/go-libddwaf/v3 v3.4.0 h1:NJ2W2vhYaOm1OWr1LJCbdgp7ezG/XLJcQKBmjFwhSuM=
+github.com/DataDog/go-libddwaf/v3 v3.4.0/go.mod h1:n98d9nZ1gzenRSk53wz8l6d34ikxS+hs62A31Fqmyi4=
github.com/DataDog/go-runtime-metrics-internal v0.0.0-20240819080326-9964da68e4b5 h1:2S3vDq1CtlmVMdq0+7TIwYKUSDJmBEsaB9gdnGI52yE=
github.com/DataDog/go-runtime-metrics-internal v0.0.0-20240819080326-9964da68e4b5/go.mod h1:quaQJ+wPN41xEC458FCpTwyROZm3MzmTZ8q8XOXQiPs=
-github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I=
-github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
+github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4=
+github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4=
github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM=
github.com/DataDog/sketches-go v1.4.5 h1:ki7VfeNz7IcNafq7yI/j5U/YCkO3LJiMDtXz9OMQbyE=
@@ -1143,8 +1143,6 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
-github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
-github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/go-chi/chi v1.5.4 h1:QHdzF2szwjqVV4wmByUnTcsbIg7UGaQ0tPF2t5GcAIs=
@@ -1834,8 +1832,8 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko
github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0=
github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
-github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
+github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 h1:jYi87L8j62qkXzaYHAQAhEapgukhenIMZRBKTNRLHJ4=
+github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
@@ -2035,8 +2033,8 @@ github.com/tidwall/rtred v0.1.2 h1:exmoQtOLvDoO8ud++6LwVsAMTu0KPzLTUrMln8u1yu8=
github.com/tidwall/rtred v0.1.2/go.mod h1:hd69WNXQ5RP9vHd7dqekAz+RIdtfBogmglkZSRxCHFQ=
github.com/tidwall/tinyqueue v0.1.1 h1:SpNEvEggbpyN5DIReaJ2/1ndroY8iyEGxPYxoSaymYE=
github.com/tidwall/tinyqueue v0.1.1/go.mod h1:O/QNHwrnjqr6IHItYrzoHAKYhBkLI67Q096fQP5zMYw=
-github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
-github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
+github.com/tinylib/msgp v1.2.1 h1:6ypy2qcCznxpP4hpORzhtXyTqrBs7cfM9MCCWY8zsmU=
+github.com/tinylib/msgp v1.2.1/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@@ -2226,8 +2224,8 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
-golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
+golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -2286,8 +2284,8 @@ golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
-golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
+golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -2373,8 +2371,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
-golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -2422,8 +2420,8 @@ golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -2569,8 +2567,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
-golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
+golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -2585,8 +2583,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
+golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2603,8 +2601,9 @@ golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -2693,10 +2692,9 @@ golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
-golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
-golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
+golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
+golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/internal/apps/Dockerfile b/internal/apps/Dockerfile
index 2f32c31648..35993e4c94 100644
--- a/internal/apps/Dockerfile
+++ b/internal/apps/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.21
+FROM golang:1.23
COPY . /dd-trace-go
WORKDIR /dd-trace-go/internal/apps
# -t will download all dependencies, including test dependencies
diff --git a/internal/apps/apps.go b/internal/apps/apps.go
index 4ac7a6e205..580719ea0a 100644
--- a/internal/apps/apps.go
+++ b/internal/apps/apps.go
@@ -25,9 +25,11 @@ type Config struct {
// default we configure non-stop execution tracing for the test apps unless
// a DD_PROFILING_EXECUTION_TRACE_PERIOD env is set or this option is true.
DisableExecutionTracing bool
+
+ httpAddr net.Addr
}
-func (c Config) RunHTTP(handler func() http.Handler) {
+func (c *Config) RunHTTP(handler func() http.Handler) {
// Parse common test app flags
var (
httpF = flag.String("http", "localhost:8080", "HTTP addr to listen on.")
@@ -74,6 +76,7 @@ func (c Config) RunHTTP(handler func() http.Handler) {
log.Fatalf("failed to listen: %s", err)
}
defer l.Close()
+ c.httpAddr = l.Addr()
log.Printf("Listening on: http://%s", *httpF)
// handler is a func, because if we create a traced handler before starting
// the tracer, the service name will default to http.router.
@@ -84,3 +87,7 @@ func (c Config) RunHTTP(handler func() http.Handler) {
<-ctx.Done()
log.Printf("Received interrupt, shutting down")
}
+
+func (c Config) HTTPAddr() net.Addr {
+ return c.httpAddr
+}
diff --git a/internal/apps/gc-overhead/main.go b/internal/apps/gc-overhead/main.go
new file mode 100644
index 0000000000..fd6fc567fe
--- /dev/null
+++ b/internal/apps/gc-overhead/main.go
@@ -0,0 +1,177 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2023 Datadog, Inc.
+
+// gc-overhead implements a http service that demonstrates high GC overhead. The
+// primary use case is to take screenshots of CPU and Memory profiles for blog
+// posts. The code is intentionally inefficient, but should produce plausible
+// FlameGraphs. Loop and data sizes are chosen so that the hotspots in the CPU
+// profile, the Allocated Memory Profile, and the Heap Live Objects profile are
+// different.
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "maps"
+ "math"
+ "math/rand/v2"
+ "net/http"
+ "runtime/debug"
+ "slices"
+ "sync"
+ "time"
+
+ "github.com/DataDog/dd-trace-go/internal/apps"
+ httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http"
+)
+
+func main() {
+ // Initialize fake data
+ initFakeData()
+
+ // Experimentally determined value to keep GC overhead around 30%.
+ debug.SetGCPercent(35)
+
+ // Start app
+ app := apps.Config{}
+ app.RunHTTP(func() http.Handler {
+ mux := httptrace.NewServeMux()
+ mux.HandleFunc("/vehicles/update_location", VehiclesUpdateLocationHandler)
+ mux.HandleFunc("/vehicles/list", VehiclesListHandler)
+ return mux
+ })
+}
+
+func VehiclesUpdateLocationHandler(w http.ResponseWriter, r *http.Request) {
+ load := int(sineLoad() * 2e5)
+ for i := 0; i < load; i++ {
+ u := &VehicleLocationUpdate{}
+ data := fakeData.vehicleLocationUpdates[i%len(fakeData.vehicleLocationUpdates)]
+ if err := parseVehicleLocationUpdate(data, u); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ store.Update(u)
+ }
+ w.Write([]byte("ok"))
+}
+
+func parseVehicleLocationUpdate(data []byte, u *VehicleLocationUpdate) error {
+ return json.Unmarshal(data, u)
+}
+
+func VehiclesListHandler(w http.ResponseWriter, r *http.Request) {
+ w.Write(renderVehiclesList().Bytes())
+}
+
+func renderVehiclesList() *bytes.Buffer {
+ buf := &bytes.Buffer{}
+ list := store.List()
+ load := sineLoad() * float64(len(list))
+ list = list[0:int(load)]
+ for _, v := range list {
+ fmt.Fprintf(buf, "%s: %v\n", v.ID, v.History)
+ }
+ return buf
+}
+
+var fakeData struct {
+ vehicleLocationUpdates [1000][]byte
+}
+
+var store = MemoryStore{}
+
+func initFakeData() {
+ for i := 0; i < len(fakeData.vehicleLocationUpdates); i++ {
+ update := VehicleLocationUpdate{
+ ID: fmt.Sprintf("vehicle-%d", i),
+ Position: Position{
+ Longitude: rand.Float64()*180 - 90,
+ Latitude: rand.Float64()*360 - 180,
+ },
+ }
+ fakeData.vehicleLocationUpdates[i], _ = json.Marshal(update)
+ }
+}
+
+type MemoryStore struct {
+ mu sync.RWMutex
+ vehicles map[string]*Vehicle
+}
+
+func (m *MemoryStore) Update(u *VehicleLocationUpdate) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if m.vehicles == nil {
+ m.vehicles = make(map[string]*Vehicle)
+ }
+
+ vehicle, ok := m.vehicles[u.ID]
+ if !ok {
+ vehicle = NewVehicle(u.ID)
+ m.vehicles[u.ID] = vehicle
+ }
+ vehicle.History = append(vehicle.History, &u.Position)
+ const historyLimit = 2000
+ if len(vehicle.History) > historyLimit {
+ // Keep only the last positions
+ copy(vehicle.History, vehicle.History[len(vehicle.History)-historyLimit:])
+ vehicle.History = vehicle.History[:historyLimit]
+ }
+}
+
+func NewVehicle(id string) *Vehicle {
+ return &Vehicle{ID: id, Data: make([]byte, 1024*1024)}
+}
+
+func (m *MemoryStore) List() (vehicles []*Vehicle) {
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+
+ for _, key := range slices.Sorted(maps.Keys(m.vehicles)) {
+ vehicles = append(vehicles, m.vehicles[key].Copy())
+ }
+ return vehicles
+}
+
+type Position struct {
+ Longitude float64
+ Latitude float64
+}
+
+type VehicleLocationUpdate struct {
+ ID string
+ Position Position
+}
+
+type Vehicle struct {
+ ID string
+ History []*Position
+ Data []byte
+}
+
+func (v *Vehicle) Copy() *Vehicle {
+ history := make([]*Position, len(v.History))
+ copy(history, v.History)
+ return &Vehicle{
+ ID: v.ID,
+ History: history,
+ }
+}
+
+// sineLoad returns a value between 0 and 1 that varies sinusoidally over time.
+func sineLoad() float64 {
+ period := 5 * time.Minute
+ // Get the current time in seconds since Unix epoch
+ currentTime := time.Now().UnixNano()
+ // Compute the phase of the sine wave, current time modulo period
+ phase := float64(currentTime) / float64(period) * 2 * math.Pi
+ // Generate the sine wave value (-1 to 1)
+ sineValue := math.Sin(phase)
+ // Normalize the sine wave value to be between 0 and 1
+ return (sineValue + 1) * 0.5
+}
diff --git a/internal/apps/go.mod b/internal/apps/go.mod
index dbc49a99ca..82339b312f 100644
--- a/internal/apps/go.mod
+++ b/internal/apps/go.mod
@@ -1,15 +1,15 @@
module github.com/DataDog/dd-trace-go/internal/apps
-go 1.21
+go 1.23.0
require (
- golang.org/x/sync v0.6.0
+ golang.org/x/sync v0.7.0
gopkg.in/DataDog/dd-trace-go.v1 v1.64.0
)
require (
- github.com/DataDog/appsec-internal-go v1.7.0 // indirect
- github.com/DataDog/go-libddwaf/v3 v3.3.0 // indirect
+ github.com/DataDog/appsec-internal-go v1.8.0 // indirect
+ github.com/DataDog/go-libddwaf/v3 v3.4.0 // indirect
github.com/DataDog/go-runtime-metrics-internal v0.0.0-20240819080326-9964da68e4b5 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect
@@ -24,16 +24,16 @@ require (
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
- golang.org/x/mod v0.16.0 // indirect
- golang.org/x/tools v0.19.0 // indirect
+ golang.org/x/mod v0.18.0 // indirect
+ golang.org/x/tools v0.22.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
require (
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 // indirect
- github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 // indirect
github.com/DataDog/datadog-go/v5 v5.3.0 // indirect
- github.com/DataDog/go-tuf v1.0.2-0.5.2 // indirect
+ github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/gostackparse v0.7.0 // indirect
github.com/DataDog/sketches-go v1.4.5 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
@@ -41,14 +41,14 @@ require (
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect
github.com/google/uuid v1.5.0 // indirect
- github.com/philhofer/fwd v1.1.2 // indirect
+ github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/stretchr/testify v1.9.0
- github.com/tinylib/msgp v1.1.8 // indirect
- golang.org/x/sys v0.20.0 // indirect
+ github.com/tinylib/msgp v1.2.1 // indirect
+ golang.org/x/sys v0.23.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
google.golang.org/protobuf v1.33.0 // indirect
diff --git a/internal/apps/go.sum b/internal/apps/go.sum
index 5a8ec755b1..f07665e08c 100644
--- a/internal/apps/go.sum
+++ b/internal/apps/go.sum
@@ -1,17 +1,17 @@
-github.com/DataDog/appsec-internal-go v1.7.0 h1:iKRNLih83dJeVya3IoUfK+6HLD/hQsIbyBlfvLmAeb0=
-github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g=
+github.com/DataDog/appsec-internal-go v1.8.0 h1:1Tfn3LEogntRqZtf88twSApOCAAO3V+NILYhuQIo4J4=
+github.com/DataDog/appsec-internal-go v1.8.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 h1:bUMSNsw1iofWiju9yc1f+kBd33E3hMJtq9GuU602Iy8=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0/go.mod h1:HzySONXnAgSmIQfL6gOv9hWprKJkx8CicuXuUbmgWfo=
-github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 h1:5nE6N3JSs2IG3xzMthNFhXfOaXlrsdgqmJ73lndFf8c=
-github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1/go.mod h1:Vc+snp0Bey4MrrJyiV2tVxxJb6BmLomPvN1RgAvjGaQ=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 h1:LplNAmMgZvGU7kKA0+4c1xWOjz828xweW5TCi8Mw9Q0=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0/go.mod h1:4Vo3SJ24uzfKHUHLoFa8t8o+LH+7TCQ7sPcZDtOpSP4=
github.com/DataDog/datadog-go/v5 v5.3.0 h1:2q2qjFOb3RwAZNU+ez27ZVDwErJv5/VpbBPprz7Z+s8=
github.com/DataDog/datadog-go/v5 v5.3.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q=
-github.com/DataDog/go-libddwaf/v3 v3.3.0 h1:jS72fuQpFgJZEdEJDmHJCPAgNTEMZoz1EUvimPUOiJ4=
-github.com/DataDog/go-libddwaf/v3 v3.3.0/go.mod h1:Bz/0JkpGf689mzbUjKJeheJINqsyyhM8p9PDuHdK2Ec=
+github.com/DataDog/go-libddwaf/v3 v3.4.0 h1:NJ2W2vhYaOm1OWr1LJCbdgp7ezG/XLJcQKBmjFwhSuM=
+github.com/DataDog/go-libddwaf/v3 v3.4.0/go.mod h1:n98d9nZ1gzenRSk53wz8l6d34ikxS+hs62A31Fqmyi4=
github.com/DataDog/go-runtime-metrics-internal v0.0.0-20240819080326-9964da68e4b5 h1:2S3vDq1CtlmVMdq0+7TIwYKUSDJmBEsaB9gdnGI52yE=
github.com/DataDog/go-runtime-metrics-internal v0.0.0-20240819080326-9964da68e4b5/go.mod h1:quaQJ+wPN41xEC458FCpTwyROZm3MzmTZ8q8XOXQiPs=
-github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I=
-github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
+github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4=
+github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4=
github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM=
github.com/DataDog/sketches-go v1.4.5 h1:ki7VfeNz7IcNafq7yI/j5U/YCkO3LJiMDtXz9OMQbyE=
@@ -39,8 +39,6 @@ github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y
github.com/ebitengine/purego v0.6.0-alpha.5 h1:EYID3JOAdmQ4SNZYJHu9V6IqOeRQDBYxqKAg9PyoHFY=
github.com/ebitengine/purego v0.6.0-alpha.5/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
-github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@@ -62,6 +60,8 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
@@ -82,8 +82,8 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0=
github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
-github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
-github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
+github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 h1:jYi87L8j62qkXzaYHAQAhEapgukhenIMZRBKTNRLHJ4=
+github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -120,10 +120,9 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
-github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
+github.com/tinylib/msgp v1.2.1 h1:6ypy2qcCznxpP4hpORzhtXyTqrBs7cfM9MCCWY8zsmU=
+github.com/tinylib/msgp v1.2.1/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
@@ -131,26 +130,18 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
+golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
-golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
+golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -159,29 +150,19 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
-golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
+golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
-golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
-golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
+golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
+golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -203,11 +184,23 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
+lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
+modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y=
+modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0=
+modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI=
modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw=
modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
+modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ=
modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
+modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
+modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
+modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
diff --git a/internal/apps/scenario_test.go b/internal/apps/scenario_test.go
index 8c39f1054f..8276a30be5 100644
--- a/internal/apps/scenario_test.go
+++ b/internal/apps/scenario_test.go
@@ -66,6 +66,42 @@ func TestScenario(t *testing.T) {
})
}
})
+
+ t.Run("gc-overhead", func(t *testing.T) {
+ scenarios := []struct {
+ version string
+ endpoints []string
+ }{
+ {"v1", []string{"/vehicles/update_location", "/vehicles/list"}},
+ }
+ for _, s := range scenarios {
+ t.Run(s.version, func(t *testing.T) {
+ lc := newLaunchConfig(t)
+ lc.Version = s.version
+ process := lc.Launch(t)
+ defer process.Stop(t)
+ wc.HitEndpoints(t, process, s.endpoints...)
+ })
+ }
+ })
+
+ t.Run("worker-pool-bottleneck", func(t *testing.T) {
+ scenarios := []struct {
+ version string
+ endpoints []string
+ }{
+ {"v1", []string{"/queue/push"}},
+ }
+ for _, s := range scenarios {
+ t.Run(s.version, func(t *testing.T) {
+ lc := newLaunchConfig(t)
+ lc.Version = s.version
+ process := lc.Launch(t)
+ defer process.Stop(t)
+ wc.HitEndpoints(t, process, s.endpoints...)
+ })
+ }
+ })
}
func newWorkloadConfig(t *testing.T) (wc workloadConfig) {
@@ -152,6 +188,13 @@ func appName(t *testing.T) string {
}
func serviceName(t *testing.T) string {
+ // Allow overriding the service name via env var
+ ddService := os.Getenv("DD_SERVICE")
+ if ddService != "" {
+ return ddService
+ }
+
+ // Otherwise derive the service name from the test name
return "dd-trace-go/" + strings.Join(strings.Split(t.Name(), "/")[1:], "/")
}
diff --git a/internal/apps/worker-pool-bottleneck/main.go b/internal/apps/worker-pool-bottleneck/main.go
new file mode 100644
index 0000000000..0b25b1ba47
--- /dev/null
+++ b/internal/apps/worker-pool-bottleneck/main.go
@@ -0,0 +1,143 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+// worker-pool-bottleneck implements a http service that demonstrates a worker
+// pool bottleneck. In particular the service simulates an application that
+// has a queue processing pipeline that consists of:
+//
+// 1. ConsumeMessageWorker: Pulls messages from a queue.
+// 2. DecodeMessageWorker: Decodes messages.
+// 3. LLMMessageWorker: Makes a long-latency call.
+// 4. PublishMessageWorker: Publishes messages.
+//
+// The LLMMessageWorker is the bottleneck in the pipeline because it doesn't
+// have enough workers to keep up with the other workers. This causes the
+// ConsumeMessageWorker and DecodeMessageWorker to block on send operations.
+//
+// The primary use case is to take screenshots of the timeline feature.
+package main
+
+import (
+ "encoding/json"
+ "io"
+ "log"
+ "math/rand/v2"
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/DataDog/dd-trace-go/internal/apps"
+ httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http"
+)
+
+func main() {
+ // Init queue
+ queue, err := NewQueue()
+ if err != nil {
+ log.Fatalf("failed to create queue: %v", err)
+ }
+
+ // Start app
+ app := apps.Config{}
+ app.RunHTTP(func() http.Handler {
+ // Setup workers
+ consumeDecode := make(chan []byte)
+ decodeLLM := make(chan any)
+ llmPublish := make(chan any)
+ go ConsumeMessageWorker(queue, consumeDecode)
+ for range 4 {
+ go DecodeMessageWorker(consumeDecode, decodeLLM)
+ go LLMMessageWorker(decodeLLM, llmPublish, app.HTTPAddr())
+ go PublishMessageWorker(llmPublish)
+ }
+
+ // Setup HTTP handlers
+ mux := httptrace.NewServeMux()
+ mux.HandleFunc("/queue/push", QueuePushHandler(queue))
+ mux.HandleFunc("/llm", LLMHandler())
+ return mux
+ })
+}
+
+func QueuePushHandler(queue *Queue) http.HandlerFunc {
+ data, _ := fakePayload(16 * 1024)
+ return func(w http.ResponseWriter, r *http.Request) {
+ for i := 0; i < 100; i++ {
+ if err := queue.Push(data); err != nil {
+ log.Fatalf("failed to push message: %v", err)
+ }
+ }
+ }
+}
+
+func LLMHandler() http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ // Flush out the headers and a short message
+ w.WriteHeader(http.StatusOK)
+ rc := http.NewResponseController(w)
+ w.Write([]byte("hello\n"))
+ rc.Flush()
+ // Wait to simulate a long time to respond
+ time.Sleep(time.Duration(rand.Float64() * 100 * float64(time.Millisecond)))
+ // Flush out another short message and finish the response
+ w.Write([]byte("world\n"))
+ rc.Flush()
+ }
+}
+
+func fakePayload(elements int) ([]byte, error) {
+ var payload []int
+ for i := 0; i < elements; i++ {
+ payload = append(payload, i)
+ }
+ return json.Marshal(payload)
+}
+
+func ConsumeMessageWorker(queue *Queue, decode chan<- []byte) {
+ for {
+ msg, err := queue.Pull()
+ if err != nil {
+ log.Fatalf("failed to pull message: %v", err)
+ }
+ decode <- msg
+ }
+}
+
+func DecodeMessageWorker(decode <-chan []byte, llm chan<- any) {
+ for {
+ msg := <-decode
+ var data interface{}
+ if err := json.Unmarshal(msg, &data); err != nil {
+ log.Fatalf("failed to decode message: %v: %q", err, string(msg))
+ }
+ llm <- data
+ }
+}
+
+func LLMMessageWorker(llm <-chan any, db chan<- any, addr net.Addr) {
+ for {
+ msg := <-llm
+ llmCall(addr)
+ db <- msg
+ }
+}
+
+func PublishMessageWorker(db <-chan any) {
+ for {
+ <-db
+ }
+}
+
+func llmCall(addr net.Addr) error {
+ res, err := http.Get("http://" + addr.String() + "/llm")
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ // Ensure that llmCall will spend most of its time in a networking state
+ // so it looks purple in the timeline.
+ _, err = io.ReadAll(res.Body)
+ return err
+}
diff --git a/internal/apps/worker-pool-bottleneck/queue.go b/internal/apps/worker-pool-bottleneck/queue.go
new file mode 100644
index 0000000000..c6a2436c2c
--- /dev/null
+++ b/internal/apps/worker-pool-bottleneck/queue.go
@@ -0,0 +1,98 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package main
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "sync"
+)
+
+// Queue pretends to be a networked message queue. In particular it arranges
+// for calls Pull() to be blocked in a stack trace doing a net.Conn.Read().
+type Queue struct {
+ listener net.Listener
+ conn net.Conn
+ pushMutex sync.Mutex
+ pullMutex sync.Mutex
+}
+
+func NewQueue() (q *Queue, err error) {
+ q = &Queue{}
+ q.listener, err = net.Listen("tcp", "localhost:0")
+ if err != nil {
+ return nil, fmt.Errorf("failed to start TCP server: %v", err)
+ }
+
+ go q.echoServer()
+
+ q.conn, err = net.Dial("tcp", q.listener.Addr().String())
+ if err != nil {
+ return nil, fmt.Errorf("failed to dial TCP server: %v", err)
+ }
+
+ return q, nil
+}
+
+func (q *Queue) echoServer() {
+ conn, err := q.listener.Accept()
+ if err != nil {
+ log.Fatalf("failed to accept connection: %v\n", err)
+ return
+ }
+ defer conn.Close()
+
+ if _, err := io.Copy(conn, conn); err != nil {
+ log.Fatalf("failed to copy data: %v\n", err)
+ return
+ }
+}
+
+func (q *Queue) Push(data []byte) error {
+ q.pushMutex.Lock()
+ defer q.pushMutex.Unlock()
+
+ // Send the length of the message first
+ err := binary.Write(q.conn, binary.BigEndian, uint64(len(data)))
+ if err != nil {
+ return fmt.Errorf("failed to send message length: %v", err)
+ }
+
+ // Send the actual message
+ _, err = q.conn.Write(data)
+ if err != nil {
+ return fmt.Errorf("failed to send message: %v", err)
+ }
+ return nil
+}
+
+func (q *Queue) Pull() ([]byte, error) {
+ q.pullMutex.Lock()
+ defer q.pullMutex.Unlock()
+
+ // Read the length of the message first
+ var length uint64
+ err := binary.Read(q.conn, binary.BigEndian, &length)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read message length: %v", err)
+ }
+
+ // Read the actual message
+ data := make([]byte, length)
+ _, err = io.ReadFull(q.conn, data)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read message: %v", err)
+ }
+ return data, nil
+}
+
+func (q *Queue) Close() {
+ q.listener.Close()
+ q.conn.Close()
+}
diff --git a/internal/appsec/README.md b/internal/appsec/README.md
new file mode 100644
index 0000000000..b6403ed35a
--- /dev/null
+++ b/internal/appsec/README.md
@@ -0,0 +1,212 @@
+# Appsec Go Design
+
+This document describes the design of the `internal/appsec` package and everything under it. This package is responsible
+for securing the application by monitoring the operations that are executed by the application and applying actions in
+case a security threats is detected.
+
+Most of the work is to forward information to the module `github.com/DataDog/go-libddwaf` which contains the WAF
+(Web Application Firewall) engine. The WAF does most of the decision making about events and actions. Our goal is to
+connect the different parts of the application and the WAF engine while keeping up to date the various sources of
+configuration that the WAF engine uses.
+
+### Instrumentation Gateway: Dyngo
+
+Having the customer (or orchestrion) instrument their code is the hardest part of the job. That's why we want to provide
+the simplest API possible for them to use. This means loosing the flexibility or enabling and disabling multiple
+products and features at runtime. Flexibility that we still want to provide to the customer, that's why behind every
+API entrypoint present in `dd-trace-go/contrib` that support appsec is a call to the `internal/appsec/dyngo` package.
+
+```mermaid
+flowchart LR
+
+UserCode[User Code] --> Instrumentation --> IG{Instrumentation
Gateway} --> Listener
+```
+
+Dyngo is a context-scoped event listener system that provide a way to listen dynamically to events that are happening in
+the customer code and to react to configuration changes and hot-swap event listeners at runtime.
+
+```mermaid
+flowchart LR
+
+UserCode[contrib] --> appsec/emitter --> IG{dyngo} --> appsec/listener --> WAF
+appsec/remoteconfig -->|config change| IG
+appsec/config -->|config change| IG
+```
+
+### Operation definition requirements
+
+* Each operation must have a `Start*` and a `Finish` method covering calls to dyngo.
+* The content of the arguments and results should not require any external package, at most the standard library.
+
+Example operation:
+
+```go
+package main
+
+import (
+ "context"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+type (
+ ExampleOperation struct {
+ dyngo.Operation
+ }
+
+ ExampleOperationArgs struct {
+ Type string
+ }
+
+ ExampleOperationResult struct {
+ Code int
+ }
+)
+
+func (ExampleOperationArgs) IsArgOf(*ExampleOperation) {}
+func (ExampleOperationResult) IsResultOf(*ExampleOperation) {}
+
+func StartExampleOperation(ctx context.Context, args ExampleOperationArgs) *ExampleOperation {
+ parent, ok := dyngo.FromContext(ctx)
+ if !ok {
+ log.Error("No parent operation found")
+ return nil
+ }
+ op := &ExampleOperation{
+ Operation: dyngo.NewOperation(parent),
+ }
+ return dyngo.StartOperation(op, args)
+}
+
+func (op *ExampleOperation) Finish(result ExampleOperationResult) {
+ dyngo.FinishOperation(op, result)
+}
+```
+
+> [!CAUTION]
+> Importing external packages in the operation definition will probably cause circular dependencies. This is because
+> the operation definition can be used in the package is will instrument, and the package that will instrument it will
+> probably import the operation definition.
+
+### Operation Stack
+
+Current state of the possible operation stacks
+
+```mermaid
+flowchart TD
+
+ subgraph Top Level Operation
+ SES[trace.ServiceEntrySpanOperation]
+
+ Context[waf.ContextOperation]
+
+ HTTPH[httpsec.HandlerOperation]
+ GRPCH[grpcsec.HandlerOperation]
+ GQL[graphqlsec.RequestOperation]
+ end
+
+ subgraph HTTP
+ RequestBody([httpsec.MonitorRequestBody])
+ Roundtripper[httpsec.RoundTripOperation]
+ end
+
+ subgraph GRPC
+ RequestMessage([grpcsec.MonitorRequestMessage])
+ ResponseMessage([grpcsec.MonitorResponseMessage])
+ end
+
+ subgraph GraphQL
+ Exec[graphqlsec.ExecutionOperation]
+ Resolve[graphqlsec.ResolveOperation]
+ end
+
+ Code{User Code}
+
+ SES --> Context
+ Context --> HTTPH --> Code
+ Context --> GRPCH --> Code
+ Context --> GQL
+
+ GQL --> Exec --> Resolve --> Code
+
+ Code --> RequestBody
+
+ Code --> RequestMessage
+ Code --> ResponseMessage
+
+ Code --> Span[trace.SpanOperation]
+
+ Span --> Roundtripper
+ Span --> OS[ossec.OpenOperation]
+ Span --> SQL[sqlsec.SQLOperation]
+ Span --> User[usersec.UserOperation]
+```
+
+> [!IMPORTANT]
+> Please note that this is how the operation SHOULD be stacked. If the user code does not have a Top Level Operation
+> then nothing will be monitored. In this case an error log should be produced to explain thoroughly the issue to
+> the user.
+
+### Features
+
+Features represent an abstract feature added to the tracer by AppSec. They are the bridge between the configuration and
+its sources
+and the actual code that needs to be ran in case of enablement or disablement of a feature. Features are divided in two
+parts:
+
+- The builder that should be a pure function that takes the configuration and returns a feature object.
+- The listeners that are methods of the feature object that are called when an event from the Instrumentation Gateway is
+ triggered.
+
+From there, at each configuration change from any config source, the AppSec module will rebuild the feature objects,
+register the listeners to the Instrumentation Gateway, and hot-swap the root level operation with the new one,
+consequently making the whole AppSec code atomic.
+
+Here is an example of how a system with only two features, GRPC and HTTP WAF Protection, would look like:
+
+```mermaid
+flowchart TD
+
+ subgraph HTTP Feature
+ HTTPListener
+ HTTPBuilder
+ end
+
+ subgraph GRPC Feature
+ GRPCBuilder
+ GRPCListener
+ end
+
+ subgraph Configuration
+ RemoteConfig
+ EnvConfig
+ ...
+ end
+
+ Configuration -->|config change| AppSec
+
+ AppSec -->|rebuild| HTTPBuilder
+ AppSec -->|rebuild| GRPCBuilder
+ HTTPBuilder -->|register HTTP Listener| IG
+ GRPCBuilder -->|register GRPC Listener| IG
+
+
+
+ IG{Instrumentation
Gateway} -->|Start httpsec.HandlerOperation| HTTPListener
+ IG{Instrumentation
Gateway} -->|Start grpcsec.HandlerOperation| GRPCListener
+```
+
+All currently available features are the following ones:
+
+| Feature Name | Description |
+|------------------------|--------------------------------------------------------|
+| HTTP WAF Protection | Protects HTTP requests from attacks |
+| GRPC WAF Protection | Protects GRPC requests from attacks |
+| GraphQL WAF Protection | Protects GraphQL requests from attacks |
+| SQL RASP | Runtime Application Self-Protection for SQL injections |
+| OS RASP | Runtime Application Self-Protection for LFI attacks |
+| HTTP RASP | Runtime Application Self-Protection for SSRF attacks |
+| User Security | User blocking and login failures/success events |
+| WAF Context | Setup of the request scoped context system of the WAF |
+| Tracing | Bridge between the tracer and AppSec features |
diff --git a/internal/appsec/_testlib/mockspan.go b/internal/appsec/_testlib/mockspan.go
deleted file mode 100644
index e9b46142b7..0000000000
--- a/internal/appsec/_testlib/mockspan.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package testlib
-
-import (
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
-)
-
-type MockSpan struct {
- Tags map[string]interface{}
- Finished bool
-}
-
-func (m *MockSpan) SetTag(key string, value interface{}) {
- if m.Tags == nil {
- m.Tags = make(map[string]interface{})
- }
- if key == ext.ManualKeep {
- if value == samplernames.AppSec {
- m.Tags[ext.ManualKeep] = true
- }
- } else {
- m.Tags[key] = value
- }
-}
-
-func (m *MockSpan) SetOperationName(_ string) {
- panic("unused")
-}
-
-func (m *MockSpan) BaggageItem(_ string) string {
- panic("unused")
-}
-
-func (m *MockSpan) SetBaggageItem(_, _ string) {
- panic("unused")
-}
-
-func (m *MockSpan) Finish(_ ...ddtrace.FinishOption) {
- m.Finished = true
-}
-
-func (m *MockSpan) Context() ddtrace.SpanContext {
- panic("unused")
-}
diff --git a/internal/appsec/_testlib/require.go b/internal/appsec/_testlib/require.go
deleted file mode 100644
index 77cde3e06c..0000000000
--- a/internal/appsec/_testlib/require.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package testlib
-
-import (
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-// RequireContainsMapSubset requires that the given map m contains the given subset map keys and values.
-func RequireContainsMapSubset(t *testing.T, m map[string]interface{}, subset map[string]interface{}) {
- for k, v := range subset {
- require.Contains(t, m, k)
- require.Equal(t, v, m[k])
- }
-}
diff --git a/internal/appsec/appsec.go b/internal/appsec/appsec.go
index ae68d0d668..0dc042caa1 100644
--- a/internal/appsec/appsec.go
+++ b/internal/appsec/appsec.go
@@ -9,13 +9,13 @@ import (
"fmt"
"sync"
- "github.com/DataDog/appsec-internal-go/limiter"
- appsecLog "github.com/DataDog/appsec-internal-go/log"
- waf "github.com/DataDog/go-libddwaf/v3"
-
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+
+ appsecLog "github.com/DataDog/appsec-internal-go/log"
+ waf "github.com/DataDog/go-libddwaf/v3"
)
// Enabled returns true when AppSec is up and running. Meaning that the appsec build tag is enabled, the env var
@@ -134,10 +134,10 @@ func setActiveAppSec(a *appsec) {
}
type appsec struct {
- cfg *config.Config
- limiter *limiter.TokenTicker
- wafHandle *waf.Handle
- started bool
+ cfg *config.Config
+ features []listener.Feature
+ featuresMu sync.Mutex
+ started bool
}
func newAppSec(cfg *config.Config) *appsec {
@@ -160,11 +160,8 @@ func (a *appsec) start(telemetry *appsecTelemetry) error {
log.Error("appsec: non-critical error while loading libddwaf: %v", err)
}
- a.limiter = limiter.NewTokenTicker(a.cfg.TraceRateLimit, a.cfg.TraceRateLimit)
- a.limiter.Start()
-
- // Register the WAF operation event listener
- if err := a.swapWAF(a.cfg.RulesManager.Latest); err != nil {
+ // Register dyngo listeners
+ if err := a.SwapRootOperation(); err != nil {
return err
}
@@ -193,15 +190,23 @@ func (a *appsec) stop() {
// Disable RC blocking first so that the following is guaranteed not to be concurrent anymore.
a.disableRCBlocking()
+ a.featuresMu.Lock()
+ defer a.featuresMu.Unlock()
+
// Disable the currently applied instrumentation
dyngo.SwapRootOperation(nil)
- if a.wafHandle != nil {
- a.wafHandle.Close()
- a.wafHandle = nil
- }
+
+ // Reset rules edits received from the remote configuration
+ // We skip the error because we can't do anything about and it was already logged in config.NewRulesManager
+ a.cfg.RulesManager, _ = config.NewRulesManager(nil)
+
// TODO: block until no more requests are using dyngo operations
- a.limiter.Stop()
+ for _, feature := range a.features {
+ feature.Stop()
+ }
+
+ a.features = nil
}
func init() {
diff --git a/internal/appsec/config/config.go b/internal/appsec/config/config.go
index e2a0b7736a..6ffcbafcf1 100644
--- a/internal/appsec/config/config.go
+++ b/internal/appsec/config/config.go
@@ -68,6 +68,30 @@ type Config struct {
// RC is the remote configuration client used to receive product configuration updates. Nil if RC is disabled (default)
RC *remoteconfig.ClientConfig
RASP bool
+ // SupportedAddresses are the addresses that the AppSec listener will bind to.
+ SupportedAddresses AddressSet
+}
+
+// AddressSet is a set of WAF addresses.
+type AddressSet map[string]struct{}
+
+func NewAddressSet(addrs []string) AddressSet {
+ set := make(AddressSet, len(addrs))
+ for _, addr := range addrs {
+ set[addr] = struct{}{}
+ }
+ return set
+}
+
+// AnyOf returns true if any of the addresses in the set are in the given list.
+func (set AddressSet) AnyOf(anyOf ...string) bool {
+ for _, addr := range anyOf {
+ if _, ok := set[addr]; ok {
+ return true
+ }
+ }
+
+ return false
}
// WithRCConfig sets the AppSec remote config client configuration to the specified cfg
@@ -105,7 +129,7 @@ func NewConfig() (*Config, error) {
return nil, err
}
- r, err := NewRulesManeger(rules)
+ r, err := NewRulesManager(rules)
if err != nil {
return nil, err
}
diff --git a/internal/appsec/config/rules_manager.go b/internal/appsec/config/rules_manager.go
index a61f68e804..e4e003eda1 100644
--- a/internal/appsec/config/rules_manager.go
+++ b/internal/appsec/config/rules_manager.go
@@ -8,6 +8,7 @@ package config
import (
"encoding/json"
"fmt"
+ "slices"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
@@ -28,24 +29,21 @@ type (
}
// RulesFragment can represent a full ruleset or a fragment of it.
RulesFragment struct {
- Version string `json:"version,omitempty"`
- Metadata any `json:"metadata,omitempty"`
- Rules []any `json:"rules,omitempty"`
- Overrides []any `json:"rules_override,omitempty"`
- Exclusions []any `json:"exclusions,omitempty"`
- RulesData []RuleDataEntry `json:"rules_data,omitempty"`
- Actions []any `json:"actions,omitempty"`
- CustomRules []any `json:"custom_rules,omitempty"`
- Processors []any `json:"processors,omitempty"`
- Scanners []any `json:"scanners,omitempty"`
+ Version string `json:"version,omitempty"`
+ Metadata any `json:"metadata,omitempty"`
+ Rules []any `json:"rules,omitempty"`
+ Overrides []any `json:"rules_override,omitempty"`
+ Exclusions []any `json:"exclusions,omitempty"`
+ ExclusionData []DataEntry `json:"exclusion_data,omitempty"`
+ RulesData []DataEntry `json:"rules_data,omitempty"`
+ Actions []any `json:"actions,omitempty"`
+ CustomRules []any `json:"custom_rules,omitempty"`
+ Processors []any `json:"processors,omitempty"`
+ Scanners []any `json:"scanners,omitempty"`
}
- // RuleDataEntry represents an entry in the "rules_data" top level field of a rules file
- RuleDataEntry rc.ASMDataRuleData
- // RulesData is a slice of RulesDataEntry
- RulesData struct {
- RulesData []RuleDataEntry `json:"rules_data"`
- }
+ // DataEntry represents an entry in the "rules_data" top level field of a rules file
+ DataEntry rc.ASMDataRuleData
)
// DefaultRulesFragment returns a RulesFragment created using the default static recommended rules
@@ -60,27 +58,20 @@ func DefaultRulesFragment() RulesFragment {
func (f *RulesFragment) clone() (clone RulesFragment) {
clone.Version = f.Version
clone.Metadata = f.Metadata
- clone.Overrides = cloneSlice(f.Overrides)
- clone.Exclusions = cloneSlice(f.Exclusions)
- clone.RulesData = cloneSlice(f.RulesData)
- clone.CustomRules = cloneSlice(f.CustomRules)
- clone.Processors = cloneSlice(f.Processors)
- clone.Scanners = cloneSlice(f.Scanners)
- // TODO (Francois Mazeau): copy more fields once we handle them
+ clone.Overrides = slices.Clone(f.Overrides)
+ clone.Exclusions = slices.Clone(f.Exclusions)
+ clone.ExclusionData = slices.Clone(f.ExclusionData)
+ clone.RulesData = slices.Clone(f.RulesData)
+ clone.CustomRules = slices.Clone(f.CustomRules)
+ clone.Processors = slices.Clone(f.Processors)
+ clone.Scanners = slices.Clone(f.Scanners)
return
}
-func cloneSlice[T any](slice []T) []T {
- // TODO: use slices.Clone once go1.21 is the min supported go runtime.
- clone := make([]T, len(slice), cap(slice))
- copy(clone, slice)
- return clone
-}
-
-// NewRulesManeger initializes and returns a new RulesManager using the provided rules.
+// NewRulesManager initializes and returns a new RulesManager using the provided rules.
// If no rules are provided (nil), the default rules are used instead.
// If the provided rules are invalid, an error is returned
-func NewRulesManeger(rules []byte) (*RulesManager, error) {
+func NewRulesManager(rules []byte) (*RulesManager, error) {
var f RulesFragment
if rules == nil {
f = DefaultRulesFragment()
@@ -135,6 +126,7 @@ func (r *RulesManager) Compile() {
for _, v := range r.Edits {
r.Latest.Overrides = append(r.Latest.Overrides, v.Overrides...)
r.Latest.Exclusions = append(r.Latest.Exclusions, v.Exclusions...)
+ r.Latest.ExclusionData = append(r.Latest.ExclusionData, v.ExclusionData...)
r.Latest.Actions = append(r.Latest.Actions, v.Actions...)
r.Latest.RulesData = append(r.Latest.RulesData, v.RulesData...)
r.Latest.CustomRules = append(r.Latest.CustomRules, v.CustomRules...)
diff --git a/internal/appsec/dyngo/operation_test.go b/internal/appsec/dyngo/operation_test.go
index 08f1150c9b..e5ce3f1c24 100644
--- a/internal/appsec/dyngo/operation_test.go
+++ b/internal/appsec/dyngo/operation_test.go
@@ -119,7 +119,7 @@ func TestUsage(t *testing.T) {
// HTTP body read listener appending the read results to a buffer
rawBodyListener := func(called *int, buf *[]byte) dyngo.EventListener[operation, HTTPHandlerArgs] {
return func(op operation, _ HTTPHandlerArgs) {
- dyngo.OnFinish(op, func(op operation, res BodyReadRes) {
+ dyngo.OnFinish(op, func(_ operation, res BodyReadRes) {
*called++
*buf = append(*buf, res.Buf...)
})
@@ -128,7 +128,7 @@ func TestUsage(t *testing.T) {
// Dummy waf looking for the string `attack` in HTTPHandlerArgs
wafListener := func(called *int, blocked *bool) dyngo.EventListener[operation, HTTPHandlerArgs] {
- return func(op operation, args HTTPHandlerArgs) {
+ return func(_ operation, args HTTPHandlerArgs) {
*called++
if strings.Contains(args.URL.RawQuery, "attack") {
@@ -148,14 +148,14 @@ func TestUsage(t *testing.T) {
jsonBodyValueListener := func(called *int, value *interface{}) dyngo.EventListener[operation, HTTPHandlerArgs] {
return func(op operation, _ HTTPHandlerArgs) {
- dyngo.On(op, func(op operation, v JSONParserArgs) {
+ dyngo.On(op, func(op operation, _ JSONParserArgs) {
didBodyRead := false
dyngo.On(op, func(_ operation, _ BodyReadArgs) {
didBodyRead = true
})
- dyngo.OnFinish(op, func(op operation, res JSONParserRes) {
+ dyngo.OnFinish(op, func(_ operation, res JSONParserRes) {
*called++
if !didBodyRead || res.Err != nil {
return
@@ -429,22 +429,22 @@ func TestSwapRootOperation(t *testing.T) {
dyngo.OnFinish(root, func(operation, MyOperationRes) { onFinishCalled++ })
dyngo.SwapRootOperation(root)
- runOperation(nil, MyOperationArgs{}, MyOperationRes{}, func(op dyngo.Operation) {})
+ runOperation(nil, MyOperationArgs{}, MyOperationRes{}, func(_ dyngo.Operation) {})
require.Equal(t, 1, onStartCalled)
require.Equal(t, 1, onFinishCalled)
dyngo.SwapRootOperation(dyngo.NewRootOperation())
- runOperation(nil, MyOperationArgs{}, MyOperationRes{}, func(op dyngo.Operation) {})
+ runOperation(nil, MyOperationArgs{}, MyOperationRes{}, func(_ dyngo.Operation) {})
require.Equal(t, 1, onStartCalled)
require.Equal(t, 1, onFinishCalled)
dyngo.SwapRootOperation(nil)
- runOperation(nil, MyOperationArgs{}, MyOperationRes{}, func(op dyngo.Operation) {})
+ runOperation(nil, MyOperationArgs{}, MyOperationRes{}, func(_ dyngo.Operation) {})
require.Equal(t, 1, onStartCalled)
require.Equal(t, 1, onFinishCalled)
dyngo.SwapRootOperation(root)
- runOperation(nil, MyOperationArgs{}, MyOperationRes{}, func(op dyngo.Operation) {})
+ runOperation(nil, MyOperationArgs{}, MyOperationRes{}, func(_ dyngo.Operation) {})
require.Equal(t, 2, onStartCalled)
require.Equal(t, 2, onFinishCalled)
}
diff --git a/internal/appsec/emitter/graphqlsec/execution.go b/internal/appsec/emitter/graphqlsec/execution.go
index 33bd4844fc..0599d5e18d 100644
--- a/internal/appsec/emitter/graphqlsec/execution.go
+++ b/internal/appsec/emitter/graphqlsec/execution.go
@@ -11,31 +11,55 @@ package graphqlsec
import (
"context"
+
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/types"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
)
+type (
+ ExecutionOperation struct {
+ dyngo.Operation
+ }
+
+ // ExecutionOperationArgs describes arguments passed to a GraphQL query operation.
+ ExecutionOperationArgs struct {
+ // Variables is the user-provided variables object for the query.
+ Variables map[string]any
+ // Query is the query that is being executed.
+ Query string
+ // OperationName is the user-provided operation name for the query.
+ OperationName string
+ }
+
+ ExecutionOperationRes struct {
+ // Data is the data returned from processing the GraphQL operation.
+ Data any
+ // Error is the error returned by processing the GraphQL Operation, if any.
+ Error error
+ }
+)
+
+// Finish the GraphQL query operation, along with the given results, and emit a finish event up in
+// the operation stack.
+func (q *ExecutionOperation) Finish(res ExecutionOperationRes) {
+ dyngo.FinishOperation(q, res)
+}
+
+func (ExecutionOperationArgs) IsArgOf(*ExecutionOperation) {}
+func (ExecutionOperationRes) IsResultOf(*ExecutionOperation) {}
+
// StartExecutionOperation starts a new GraphQL query operation, along with the given arguments, and
// emits a start event up in the operation stack. The operation is tracked on the returned context,
// and can be extracted later on using FromContext.
-func StartExecutionOperation(ctx context.Context, span trace.TagSetter, args types.ExecutionOperationArgs) (context.Context, *types.ExecutionOperation) {
- if span == nil {
- // The span may be nil (e.g: in case of GraphQL subscriptions with certian contribs). Child
- // operations might have spans however... and these should be used then.
- span = trace.NoopTagSetter{}
- }
-
+func StartExecutionOperation(ctx context.Context, args ExecutionOperationArgs) (context.Context, *ExecutionOperation) {
parent, ok := dyngo.FromContext(ctx)
if !ok {
log.Debug("appsec: StartExecutionOperation: no parent operation found in context")
}
- op := &types.ExecutionOperation{
+ op := &ExecutionOperation{
Operation: dyngo.NewOperation(parent),
- TagSetter: span,
}
return dyngo.StartAndRegisterOperation(ctx, op, args), op
diff --git a/internal/appsec/emitter/graphqlsec/init.go b/internal/appsec/emitter/graphqlsec/init.go
deleted file mode 100644
index a38d7932d7..0000000000
--- a/internal/appsec/emitter/graphqlsec/init.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package graphqlsec
-
-import (
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/graphqlsec"
-)
-
-func init() {
- appsec.AddWAFEventListener(graphqlsec.Install)
-}
diff --git a/internal/appsec/emitter/graphqlsec/request.go b/internal/appsec/emitter/graphqlsec/request.go
index a51624916e..70e1f6dff7 100644
--- a/internal/appsec/emitter/graphqlsec/request.go
+++ b/internal/appsec/emitter/graphqlsec/request.go
@@ -13,23 +13,56 @@ import (
"context"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/types"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf"
)
+type (
+ RequestOperation struct {
+ dyngo.Operation
+ // used in case we don't have a parent operation
+ *waf.ContextOperation
+ }
+
+ // RequestOperationArgs describes arguments passed to a GraphQL request.
+ RequestOperationArgs struct {
+ RawQuery string // The raw, not-yet-parsed GraphQL query
+ OperationName string // The user-provided operation name for the query
+ Variables map[string]any // The user-provided variables object for this request
+ }
+
+ RequestOperationRes struct {
+ // Data is the data returned from processing the GraphQL operation.
+ Data any
+ // Error is the error returned by processing the GraphQL Operation, if any.
+ Error error
+ }
+)
+
+// Finish the GraphQL query operation, along with the given results, and emit a finish event up in
+// the operation stack.
+func (op *RequestOperation) Finish(span trace.TagSetter, res RequestOperationRes) {
+ dyngo.FinishOperation(op, res)
+ if op.ContextOperation != nil {
+ op.ContextOperation.Finish(span)
+ }
+}
+
+func (RequestOperationArgs) IsArgOf(*RequestOperation) {}
+func (RequestOperationRes) IsResultOf(*RequestOperation) {}
+
// StartRequestOperation starts a new GraphQL request operation, along with the given arguments, and
// emits a start event up in the operation stack. The operation is usually linked to tge global root
// operation. The operation is tracked on the returned context, and can be extracted later on using
// FromContext.
-func StartRequestOperation(ctx context.Context, span trace.TagSetter, args types.RequestOperationArgs) (context.Context, *types.RequestOperation) {
- if span == nil {
- // The span may be nil (e.g: in case of GraphQL subscriptions with certian contribs)
- span = trace.NoopTagSetter{}
- }
-
- op := &types.RequestOperation{
- Operation: dyngo.NewOperation(nil),
- TagSetter: span,
+func StartRequestOperation(ctx context.Context, args RequestOperationArgs) (context.Context, *RequestOperation) {
+ parent, ok := dyngo.FromContext(ctx)
+ op := &RequestOperation{}
+ if !ok { // Usually we can find the HTTP Handler Operation as the parent but it's technically optional
+ op.ContextOperation, ctx = waf.StartContextOperation(ctx)
+ op.Operation = dyngo.NewOperation(op.ContextOperation)
+ } else {
+ op.Operation = dyngo.NewOperation(parent)
}
return dyngo.StartAndRegisterOperation(ctx, op, args), op
diff --git a/internal/appsec/emitter/graphqlsec/resolve.go b/internal/appsec/emitter/graphqlsec/resolve.go
index 4ef73248af..b7f5fe067b 100644
--- a/internal/appsec/emitter/graphqlsec/resolve.go
+++ b/internal/appsec/emitter/graphqlsec/resolve.go
@@ -7,25 +7,57 @@ package graphqlsec
import (
"context"
+
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/types"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
)
+type (
+ ResolveOperation struct {
+ dyngo.Operation
+ }
+
+ // ResolveOperationArgs describes arguments passed to a GraphQL field operation.
+ ResolveOperationArgs struct {
+ // TypeName is the name of the field's type
+ TypeName string
+ // FieldName is the name of the field
+ FieldName string
+ // Arguments is the arguments provided to the field resolver
+ Arguments map[string]any
+ // Trivial determines whether the resolution is trivial or not. Leave as false if undetermined.
+ Trivial bool
+ }
+
+ ResolveOperationRes struct {
+ // Data is the data returned from processing the GraphQL operation.
+ Data any
+ // Error is the error returned by processing the GraphQL Operation, if any.
+ Error error
+ }
+)
+
+// Finish the GraphQL Field operation, along with the given results, and emit a finish event up in
+// the operation stack.
+func (q *ResolveOperation) Finish(res ResolveOperationRes) {
+ dyngo.FinishOperation(q, res)
+}
+
+func (ResolveOperationArgs) IsArgOf(*ResolveOperation) {}
+func (ResolveOperationRes) IsResultOf(*ResolveOperation) {}
+
// StartResolveOperation starts a new GraphQL Resolve operation, along with the given arguments, and
// emits a start event up in the operation stack. The operation is tracked on the returned context,
// and can be extracted later on using FromContext.
-func StartResolveOperation(ctx context.Context, span trace.TagSetter, args types.ResolveOperationArgs) (context.Context, *types.ResolveOperation) {
+func StartResolveOperation(ctx context.Context, args ResolveOperationArgs) (context.Context, *ResolveOperation) {
parent, ok := dyngo.FromContext(ctx)
if !ok {
log.Debug("appsec: StartResolveOperation: no parent operation found in context")
}
- op := &types.ResolveOperation{
+ op := &ResolveOperation{
Operation: dyngo.NewOperation(parent),
- TagSetter: span,
}
return dyngo.StartAndRegisterOperation(ctx, op, args), op
}
diff --git a/internal/appsec/emitter/graphqlsec/types/types.go b/internal/appsec/emitter/graphqlsec/types/types.go
deleted file mode 100644
index d8b0d1948c..0000000000
--- a/internal/appsec/emitter/graphqlsec/types/types.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package types
-
-import (
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
-)
-
-type (
- RequestOperation struct {
- dyngo.Operation
- trace.TagSetter
- trace.SecurityEventsHolder
- }
-
- // RequestOperationArgs describes arguments passed to a GraphQL request.
- RequestOperationArgs struct {
- RawQuery string // The raw, not-yet-parsed GraphQL query
- OperationName string // The user-provided operation name for the query
- Variables map[string]any // The user-provided variables object for this request
- }
-
- RequestOperationRes struct {
- // Data is the data returned from processing the GraphQL operation.
- Data any
- // Error is the error returned by processing the GraphQL Operation, if any.
- Error error
- }
-)
-
-// Finish the GraphQL query operation, along with the given results, and emit a finish event up in
-// the operation stack.
-func (q *RequestOperation) Finish(res RequestOperationRes) {
- dyngo.FinishOperation(q, res)
-}
-
-func (RequestOperationArgs) IsArgOf(*RequestOperation) {}
-func (RequestOperationRes) IsResultOf(*RequestOperation) {}
-
-type (
- ExecutionOperation struct {
- dyngo.Operation
- trace.TagSetter
- trace.SecurityEventsHolder
- }
-
- // ExecutionOperationArgs describes arguments passed to a GraphQL query operation.
- ExecutionOperationArgs struct {
- // Variables is the user-provided variables object for the query.
- Variables map[string]any
- // Query is the query that is being executed.
- Query string
- // OperationName is the user-provided operation name for the query.
- OperationName string
- }
-
- ExecutionOperationRes struct {
- // Data is the data returned from processing the GraphQL operation.
- Data any
- // Error is the error returned by processing the GraphQL Operation, if any.
- Error error
- }
-)
-
-// Finish the GraphQL query operation, along with the given results, and emit a finish event up in
-// the operation stack.
-func (q *ExecutionOperation) Finish(res ExecutionOperationRes) {
- dyngo.FinishOperation(q, res)
-}
-
-func (ExecutionOperationArgs) IsArgOf(*ExecutionOperation) {}
-func (ExecutionOperationRes) IsResultOf(*ExecutionOperation) {}
-
-type (
- ResolveOperation struct {
- dyngo.Operation
- trace.TagSetter
- trace.SecurityEventsHolder
- }
-
- // ResolveOperationArgs describes arguments passed to a GraphQL field operation.
- ResolveOperationArgs struct {
- // TypeName is the name of the field's type
- TypeName string
- // FieldName is the name of the field
- FieldName string
- // Arguments is the arguments provided to the field resolver
- Arguments map[string]any
- // Trivial determines whether the resolution is trivial or not. Leave as false if undetermined.
- Trivial bool
- }
-
- ResolveOperationRes struct {
- // Data is the data returned from processing the GraphQL operation.
- Data any
- // Error is the error returned by processing the GraphQL Operation, if any.
- Error error
- }
-)
-
-// Finish the GraphQL Field operation, along with the given results, and emit a finish event up in
-// the operation stack.
-func (q *ResolveOperation) Finish(res ResolveOperationRes) {
- dyngo.FinishOperation(q, res)
-}
-
-func (ResolveOperationArgs) IsArgOf(*ResolveOperation) {}
-func (ResolveOperationRes) IsResultOf(*ResolveOperation) {}
diff --git a/internal/appsec/emitter/grpcsec/grpc.go b/internal/appsec/emitter/grpcsec/grpc.go
index 15a7b57ae5..e6b28e3124 100644
--- a/internal/appsec/emitter/grpcsec/grpc.go
+++ b/internal/appsec/emitter/grpcsec/grpc.go
@@ -7,37 +7,110 @@
// defining an abstract run-time representation of gRPC handlers.
// gRPC integrations must use this package to enable AppSec features for gRPC,
// which listens to this package's operation events.
+//
+// Abstract gRPC server handler operation definitions. It is based on two
+// operations allowing to describe every type of RPC: the HandlerOperation type
+// which represents the RPC handler, and the ReceiveOperation type which
+// represents the messages the RPC handler receives during its lifetime.
+// This means that the ReceiveOperation(s) will happen within the
+// HandlerOperation.
+// Every type of RPC, unary, client streaming, server streaming, and
+// bidirectional streaming RPCs, can be all represented with a HandlerOperation
+// having one or several ReceiveOperation.
+// The send operation is not required for now and therefore not defined, which
+// means that server and bidirectional streaming RPCs currently have the same
+// run-time representation as unary and client streaming RPCs.
package grpcsec
import (
"context"
+ "sync/atomic"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec/types"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
)
+type (
+ // HandlerOperation represents a gRPC server handler operation.
+ // It must be created with StartHandlerOperation() and finished with its
+ // Finish() method.
+ // Security events observed during the operation lifetime should be added
+ // to the operation using its AddSecurityEvent() method.
+ HandlerOperation struct {
+ dyngo.Operation
+ *waf.ContextOperation
+ }
+
+ // HandlerOperationArgs is the grpc handler arguments.
+ HandlerOperationArgs struct {
+ // Method is the gRPC method name.
+ // Corresponds to the address `grpc.server.method`.
+ Method string
+
+ // RPC metadata received by the gRPC handler.
+ // Corresponds to the address `grpc.server.request.metadata`.
+ Metadata map[string][]string
+
+ // RemoteAddr is the IP address of the client that initiated the gRPC request.
+ // May be used as the address `http.client_ip`.
+ RemoteAddr string
+ }
+
+ // HandlerOperationRes is the grpc handler results. Empty as of today.
+ HandlerOperationRes struct {
+ // Raw gRPC status code.
+ // Corresponds to the address `grpc.server.response.status`.
+ StatusCode int
+ }
+)
+
+func (HandlerOperationArgs) IsArgOf(*HandlerOperation) {}
+func (HandlerOperationRes) IsResultOf(*HandlerOperation) {}
+
// StartHandlerOperation starts an gRPC server handler operation, along with the
// given arguments and parent operation, and emits a start event up in the
// operation stack. When parent is nil, the operation is linked to the global
// root operation.
-func StartHandlerOperation(ctx context.Context, args types.HandlerOperationArgs, parent dyngo.Operation, setup ...func(*types.HandlerOperation)) (context.Context, *types.HandlerOperation) {
- op := &types.HandlerOperation{
- Operation: dyngo.NewOperation(parent),
- TagsHolder: trace.NewTagsHolder(),
- }
- for _, cb := range setup {
- cb(op)
+func StartHandlerOperation(ctx context.Context, args HandlerOperationArgs) (context.Context, *HandlerOperation, *atomic.Pointer[actions.BlockGRPC]) {
+ wafOp, ctx := waf.StartContextOperation(ctx)
+ op := &HandlerOperation{
+ Operation: dyngo.NewOperation(wafOp),
+ ContextOperation: wafOp,
}
- return dyngo.StartAndRegisterOperation(ctx, op, args), op
+
+ var block atomic.Pointer[actions.BlockGRPC]
+ dyngo.OnData(op, func(err *actions.BlockGRPC) {
+ block.Store(err)
+ })
+
+ return dyngo.StartAndRegisterOperation(ctx, op, args), op, &block
+}
+
+// MonitorRequestMessage monitors the gRPC request message body as the WAF address `grpc.server.request.message`.
+func MonitorRequestMessage(ctx context.Context, msg any) error {
+ return waf.RunSimple(ctx,
+ addresses.NewAddressesBuilder().
+ WithGRPCRequestMessage(msg).
+ Build(),
+ "appsec: failed to monitor gRPC request message body")
+}
+
+// MonitorResponseMessage monitors the gRPC response message body as the WAF address `grpc.server.response.message`.
+func MonitorResponseMessage(ctx context.Context, msg any) error {
+ return waf.RunSimple(ctx,
+ addresses.NewAddressesBuilder().
+ WithGRPCResponseMessage(msg).
+ Build(),
+ "appsec: failed to monitor gRPC response message body")
+
}
-// StartReceiveOperation starts a receive operation of a gRPC handler, along
-// with the given arguments and parent operation, and emits a start event up in
-// the operation stack. When parent is nil, the operation is linked to the
-// global root operation.
-func StartReceiveOperation(args types.ReceiveOperationArgs, parent dyngo.Operation) types.ReceiveOperation {
- op := types.ReceiveOperation{Operation: dyngo.NewOperation(parent)}
- dyngo.StartOperation(op, args)
- return op
+// Finish the gRPC handler operation, along with the given results, and emit a
+// finish event up in the operation stack.
+func (op *HandlerOperation) Finish(span trace.TagSetter, res HandlerOperationRes) {
+ dyngo.FinishOperation(op, res)
+ op.ContextOperation.Finish(span)
}
diff --git a/internal/appsec/emitter/grpcsec/grpc_test.go b/internal/appsec/emitter/grpcsec/grpc_test.go
deleted file mode 100644
index c5d8d0916d..0000000000
--- a/internal/appsec/emitter/grpcsec/grpc_test.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package grpcsec_test
-
-import (
- "context"
- "fmt"
- "testing"
-
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- grpcsec "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec/types"
-
- "github.com/stretchr/testify/require"
-)
-
-type (
- rootArgs struct{}
- rootRes struct{}
-)
-
-func (rootArgs) IsArgOf(dyngo.Operation) {}
-func (rootRes) IsResultOf(dyngo.Operation) {}
-
-func TestUsage(t *testing.T) {
- testRPCRepresentation := func(expectedRecvOperation int) func(*testing.T) {
- return func(t *testing.T) {
- localRootOp := dyngo.NewRootOperation()
- dyngo.StartOperation(localRootOp, rootArgs{})
- defer dyngo.FinishOperation(localRootOp, rootRes{})
-
- var handlerStarted, handlerFinished, recvStarted, recvFinished int
- defer func() {
- require.Equal(t, 1, handlerStarted)
- require.Equal(t, 1, handlerFinished)
- require.Equal(t, expectedRecvOperation, recvStarted)
- require.Equal(t, expectedRecvOperation, recvFinished)
- }()
-
- const expectedMessageFormat = "message number %d"
-
- dyngo.On(localRootOp, func(handlerOp *types.HandlerOperation, args types.HandlerOperationArgs) {
- handlerStarted++
-
- dyngo.On(handlerOp, func(op types.ReceiveOperation, _ types.ReceiveOperationArgs) {
- recvStarted++
-
- dyngo.OnFinish(op, func(_ types.ReceiveOperation, res types.ReceiveOperationRes) {
- expectedMessage := fmt.Sprintf(expectedMessageFormat, recvStarted)
- require.Equal(t, expectedMessage, res.Message)
- recvFinished++
-
- handlerOp.AddSecurityEvents([]any{expectedMessage})
- })
- })
-
- dyngo.OnFinish(handlerOp, func(*types.HandlerOperation, types.HandlerOperationRes) { handlerFinished++ })
- })
-
- _, rpcOp := grpcsec.StartHandlerOperation(context.Background(), types.HandlerOperationArgs{}, localRootOp)
-
- for i := 1; i <= expectedRecvOperation; i++ {
- recvOp := grpcsec.StartReceiveOperation(types.ReceiveOperationArgs{}, rpcOp)
- recvOp.Finish(types.ReceiveOperationRes{Message: fmt.Sprintf(expectedMessageFormat, i)})
- }
-
- secEvents := rpcOp.Finish(types.HandlerOperationRes{})
-
- require.Len(t, secEvents, expectedRecvOperation)
- for i, e := range secEvents {
- require.Equal(t, fmt.Sprintf(expectedMessageFormat, i+1), e)
- }
- }
- }
-
- // Unary RPCs are represented by a single receive operation
- t.Run("unary-representation", testRPCRepresentation(1))
- // Client streaming RPCs are represented by many receive operations.
- t.Run("client-streaming-representation", testRPCRepresentation(10))
- // Server and bidirectional streaming RPCs cannot be tested for now because
- // the send operations are not used nor defined yet, server streaming RPCs
- // are currently represented like unary RPCs (1 client message, N server
- // messages), and bidirectional RPCs like client streaming RPCs (N client
- // messages, M server messages).
-}
diff --git a/internal/appsec/emitter/grpcsec/init.go b/internal/appsec/emitter/grpcsec/init.go
deleted file mode 100644
index f79eda99f8..0000000000
--- a/internal/appsec/emitter/grpcsec/init.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package grpcsec
-
-import (
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/grpcsec"
-)
-
-func init() {
- appsec.AddWAFEventListener(grpcsec.Install)
-}
diff --git a/internal/appsec/emitter/grpcsec/types/types.go b/internal/appsec/emitter/grpcsec/types/types.go
deleted file mode 100644
index 449ce2fbc3..0000000000
--- a/internal/appsec/emitter/grpcsec/types/types.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package types
-
-import (
- "net/netip"
-
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
-)
-
-// Abstract gRPC server handler operation definitions. It is based on two
-// operations allowing to describe every type of RPC: the HandlerOperation type
-// which represents the RPC handler, and the ReceiveOperation type which
-// represents the messages the RPC handler receives during its lifetime.
-// This means that the ReceiveOperation(s) will happen within the
-// HandlerOperation.
-// Every type of RPC, unary, client streaming, server streaming, and
-// bidirectional streaming RPCs, can be all represented with a HandlerOperation
-// having one or several ReceiveOperation.
-// The send operation is not required for now and therefore not defined, which
-// means that server and bidirectional streaming RPCs currently have the same
-// run-time representation as unary and client streaming RPCs.
-type (
- // HandlerOperation represents a gRPC server handler operation.
- // It must be created with StartHandlerOperation() and finished with its
- // Finish() method.
- // Security events observed during the operation lifetime should be added
- // to the operation using its AddSecurityEvent() method.
- HandlerOperation struct {
- dyngo.Operation
- Error error
- trace.TagsHolder
- trace.SecurityEventsHolder
- }
-
- // HandlerOperationArgs is the grpc handler arguments.
- HandlerOperationArgs struct {
- // Method is the gRPC method name.
- // Corresponds to the address `grpc.server.method`.
- Method string
-
- // RPC metadata received by the gRPC handler.
- // Corresponds to the address `grpc.server.request.metadata`.
- Metadata map[string][]string
-
- // ClientIP is the IP address of the client that initiated the gRPC request.
- // Corresponds to the address `http.client_ip`.
- ClientIP netip.Addr
- }
-
- // HandlerOperationRes is the grpc handler results. Empty as of today.
- HandlerOperationRes struct{}
-
- // ReceiveOperation type representing an gRPC server handler operation. It must
- // be created with StartReceiveOperation() and finished with its Finish().
- ReceiveOperation struct {
- dyngo.Operation
- }
-
- // ReceiveOperationArgs is the gRPC handler receive operation arguments
- // Empty as of today.
- ReceiveOperationArgs struct{}
-
- // ReceiveOperationRes is the gRPC handler receive operation results which
- // contains the message the gRPC handler received.
- ReceiveOperationRes struct {
- // Message received by the gRPC handler.
- // Corresponds to the address `grpc.server.request.message`.
- Message interface{}
- }
-)
-
-// Finish the gRPC handler operation, along with the given results, and emit a
-// finish event up in the operation stack.
-func (op *HandlerOperation) Finish(res HandlerOperationRes) []any {
- dyngo.FinishOperation(op, res)
- return op.Events()
-}
-
-// Finish the gRPC handler operation, along with the given results, and emits a
-// finish event up in the operation stack.
-func (op ReceiveOperation) Finish(res ReceiveOperationRes) {
- dyngo.FinishOperation(op, res)
-}
-
-func (HandlerOperationArgs) IsArgOf(*HandlerOperation) {}
-func (HandlerOperationRes) IsResultOf(*HandlerOperation) {}
-
-func (ReceiveOperationArgs) IsArgOf(ReceiveOperation) {}
-func (ReceiveOperationRes) IsResultOf(ReceiveOperation) {}
diff --git a/internal/appsec/emitter/httpsec/http.go b/internal/appsec/emitter/httpsec/http.go
index 37c56ef283..9f81cdc20e 100644
--- a/internal/appsec/emitter/httpsec/http.go
+++ b/internal/appsec/emitter/httpsec/http.go
@@ -12,169 +12,92 @@ package httpsec
import (
"context"
-
// Blank import needed to use embed for the default blocked response payloads
_ "embed"
"net/http"
- "strings"
+ "sync"
+ "sync/atomic"
- "gopkg.in/DataDog/dd-trace-go.v1/appsec/events"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace"
-
- "github.com/DataDog/appsec-internal-go/netip"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
)
-// MonitorParsedBody starts and finishes the SDK body operation.
-// This function should not be called when AppSec is disabled in order to
-// get preciser error logs.
-func MonitorParsedBody(ctx context.Context, body any) error {
- parent, _ := dyngo.FromContext(ctx)
- if parent == nil {
- log.Error("appsec: parsed http body monitoring ignored: could not find the http handler instrumentation metadata in the request context: the request handler is not being monitored by a middleware function or the provided context is not the expected request context")
- return nil
+// HandlerOperation type representing an HTTP operation. It must be created with
+// StartOperation() and finished with its Finish().
+type (
+ HandlerOperation struct {
+ dyngo.Operation
+ *waf.ContextOperation
+ mu sync.RWMutex
}
- return ExecuteSDKBodyOperation(parent, types.SDKBodyOperationArgs{Body: body})
-}
-
-// ExecuteSDKBodyOperation starts and finishes the SDK Body operation by emitting a dyngo start and finish events
-// An error is returned if the body associated to that operation must be blocked
-func ExecuteSDKBodyOperation(parent dyngo.Operation, args types.SDKBodyOperationArgs) error {
- var err error
- op := &types.SDKBodyOperation{Operation: dyngo.NewOperation(parent)}
- dyngo.OnData(op, func(e *events.BlockingSecurityEvent) {
- err = e
- })
- dyngo.StartOperation(op, args)
- dyngo.FinishOperation(op, types.SDKBodyOperationRes{})
- return err
-}
-
-// WrapHandler wraps the given HTTP handler with the abstract HTTP operation defined by HandlerOperationArgs and
-// HandlerOperationRes.
-// The onBlock params are used to cleanup the context when needed.
-// It is a specific patch meant for Gin, for which we must abort the
-// context since it uses a queue of handlers and it's the only way to make
-// sure other queued handlers don't get executed.
-// TODO: this patch must be removed/improved when we rework our actions/operations system
-func WrapHandler(handler http.Handler, span ddtrace.Span, pathParams map[string]string, opts *Config) http.Handler {
- if opts == nil {
- opts = defaultWrapHandlerConfig
- } else if opts.ResponseHeaderCopier == nil {
- opts.ResponseHeaderCopier = defaultWrapHandlerConfig.ResponseHeaderCopier
+ // HandlerOperationArgs is the HTTP handler operation arguments.
+ HandlerOperationArgs struct {
+ Method string
+ RequestURI string
+ Host string
+ RemoteAddr string
+ Headers map[string][]string
+ Cookies map[string][]string
+ QueryParams map[string][]string
+ PathParams map[string]string
}
- trace.SetAppSecEnabledTags(span)
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- ipTags, clientIP := httptrace.ClientIPTags(r.Header, true, r.RemoteAddr)
- log.Debug("appsec: http client ip detection returned `%s` given the http headers `%v`", clientIP, r.Header)
- trace.SetTags(span, ipTags)
-
- var bypassHandler http.Handler
- var blocking bool
- var stackTrace *stacktrace.Event
- args := MakeHandlerOperationArgs(r, clientIP, pathParams)
- ctx, op := StartOperation(r.Context(), args, func(op *types.Operation) {
- dyngo.OnData(op, func(a *sharedsec.HTTPAction) {
- blocking = true
- bypassHandler = a.Handler
- })
- dyngo.OnData(op, func(a *sharedsec.StackTraceAction) {
- stackTrace = &a.Event
- })
- })
- r = r.WithContext(ctx)
-
- defer func() {
- events := op.Finish(MakeHandlerOperationRes(w))
-
- // Execute the onBlock functions to make sure blocking works properly
- // in case we are instrumenting the Gin framework
- if blocking {
- op.SetTag(trace.BlockedRequestTag, true)
- for _, f := range opts.OnBlock {
- f()
- }
- }
-
- // Add stacktraces to the span, if any
- if stackTrace != nil {
- stacktrace.AddToSpan(span, stackTrace)
- }
+ // HandlerOperationRes is the HTTP handler operation results.
+ HandlerOperationRes struct {
+ Headers map[string][]string
+ StatusCode int
+ }
+)
- if bypassHandler != nil {
- bypassHandler.ServeHTTP(w, r)
- }
+func (HandlerOperationArgs) IsArgOf(*HandlerOperation) {}
+func (HandlerOperationRes) IsResultOf(*HandlerOperation) {}
- // Add the request headers span tags out of args.Headers instead of r.Header as it was normalized and some
- // extra headers have been added such as the Host header which is removed from the original Go request headers
- // map
- setRequestHeadersTags(span, args.Headers)
- setResponseHeadersTags(span, opts.ResponseHeaderCopier(w))
- trace.SetTags(span, op.Tags())
- if len(events) > 0 {
- httptrace.SetSecurityEventsTags(span, events)
- }
- }()
+func StartOperation(ctx context.Context, args HandlerOperationArgs) (*HandlerOperation, *atomic.Pointer[actions.BlockHTTP], context.Context) {
+ wafOp, ctx := waf.StartContextOperation(ctx)
+ op := &HandlerOperation{
+ Operation: dyngo.NewOperation(wafOp),
+ ContextOperation: wafOp,
+ }
- if bypassHandler != nil {
- handler = bypassHandler
- bypassHandler = nil
- }
- handler.ServeHTTP(w, r)
+ // We need to use an atomic pointer to store the action because the action may be created asynchronously in the future
+ var action atomic.Pointer[actions.BlockHTTP]
+ dyngo.OnData(op, func(a *actions.BlockHTTP) {
+ action.Store(a)
})
-}
-// MakeHandlerOperationArgs creates the HandlerOperationArgs value.
-func MakeHandlerOperationArgs(r *http.Request, clientIP netip.Addr, pathParams map[string]string) types.HandlerOperationArgs {
- cookies := makeCookies(r) // TODO(Julio-Guerra): avoid actively parsing the cookies thanks to dynamic instrumentation
- headers := headersRemoveCookies(r.Header)
- headers["host"] = []string{r.Host}
- return types.HandlerOperationArgs{
- Method: r.Method,
- RequestURI: r.RequestURI,
- Headers: headers,
- Cookies: cookies,
- Query: r.URL.Query(), // TODO(Julio-Guerra): avoid actively parsing the query values thanks to dynamic instrumentation
- PathParams: pathParams,
- ClientIP: clientIP,
- }
+ return op, &action, dyngo.StartAndRegisterOperation(ctx, op, args)
}
-// MakeHandlerOperationRes creates the HandlerOperationRes value.
-func MakeHandlerOperationRes(w http.ResponseWriter) types.HandlerOperationRes {
- var status int
- if mw, ok := w.(interface{ Status() int }); ok {
- status = mw.Status()
- }
- return types.HandlerOperationRes{Status: status, Headers: headersRemoveCookies(w.Header())}
+// Finish the HTTP handler operation and its children operations and write everything to the service entry span.
+func (op *HandlerOperation) Finish(res HandlerOperationRes, span ddtrace.Span) {
+ dyngo.FinishOperation(op, res)
+ op.ContextOperation.Finish(span)
}
-// Remove cookies from the request headers and return the map of headers
-// Used from `server.request.headers.no_cookies` and server.response.headers.no_cookies` addresses for the WAF
-func headersRemoveCookies(headers http.Header) map[string][]string {
- headersNoCookies := make(http.Header, len(headers))
- for k, v := range headers {
- k := strings.ToLower(k)
- if k == "cookie" {
- continue
- }
- headersNoCookies[k] = v
- }
- return headersNoCookies
+const monitorBodyErrorLog = `
+"appsec: parsed http body monitoring ignored: could not find the http handler instrumentation metadata in the request context:
+ the request handler is not being monitored by a middleware function or the provided context is not the expected request context
+`
+
+// MonitorParsedBody starts and finishes the SDK body operation.
+// This function should not be called when AppSec is disabled in order to
+// get preciser error logs.
+func MonitorParsedBody(ctx context.Context, body any) error {
+ return waf.RunSimple(ctx,
+ addresses.NewAddressesBuilder().
+ WithRequestBody(body).
+ Build(),
+ monitorBodyErrorLog,
+ )
}
// Return the map of parsed cookies if any and following the specification of
// the rule address `server.request.cookies`.
-func makeCookies(r *http.Request) map[string][]string {
- parsed := r.Cookies()
+func makeCookies(parsed []*http.Cookie) map[string][]string {
if len(parsed) == 0 {
return nil
}
@@ -185,18 +108,83 @@ func makeCookies(r *http.Request) map[string][]string {
return cookies
}
-// StartOperation starts an HTTP handler operation, along with the given
-// context and arguments and emits a start event up in the operation stack.
-// The operation is linked to the global root operation since an HTTP operation
-// is always expected to be first in the operation stack.
-func StartOperation(ctx context.Context, args types.HandlerOperationArgs, setup ...func(*types.Operation)) (context.Context, *types.Operation) {
- op := &types.Operation{
- Operation: dyngo.NewOperation(nil),
- TagsHolder: trace.NewTagsHolder(),
+// BeforeHandle contains the appsec functionality that should be executed before a http.Handler runs.
+// It returns the modified http.ResponseWriter and http.Request, an additional afterHandle function
+// that should be executed after the Handler runs, and a handled bool that instructs if the request has been handled
+// or not - in case it was handled, the original handler should not run.
+func BeforeHandle(
+ w http.ResponseWriter,
+ r *http.Request,
+ span ddtrace.Span,
+ pathParams map[string]string,
+ opts *Config,
+) (http.ResponseWriter, *http.Request, func(), bool) {
+ if opts == nil {
+ opts = defaultWrapHandlerConfig
+ } else if opts.ResponseHeaderCopier == nil {
+ opts.ResponseHeaderCopier = defaultWrapHandlerConfig.ResponseHeaderCopier
}
- for _, cb := range setup {
- cb(op)
+
+ op, blockAtomic, ctx := StartOperation(r.Context(), HandlerOperationArgs{
+ Method: r.Method,
+ RequestURI: r.RequestURI,
+ Host: r.Host,
+ RemoteAddr: r.RemoteAddr,
+ Headers: r.Header,
+ Cookies: makeCookies(r.Cookies()),
+ QueryParams: r.URL.Query(),
+ PathParams: pathParams,
+ })
+ tr := r.WithContext(ctx)
+
+ afterHandle := func() {
+ var statusCode int
+ if res, ok := w.(interface{ Status() int }); ok {
+ statusCode = res.Status()
+ }
+ op.Finish(HandlerOperationRes{
+ Headers: opts.ResponseHeaderCopier(w),
+ StatusCode: statusCode,
+ }, span)
+
+ // Execute the onBlock functions to make sure blocking works properly
+ // in case we are instrumenting the Gin framework
+ if blockPtr := blockAtomic.Load(); blockPtr != nil {
+ for _, f := range opts.OnBlock {
+ f()
+ }
+
+ if blockPtr.Handler != nil {
+ blockPtr.Handler.ServeHTTP(w, tr)
+ }
+ }
}
- return dyngo.StartAndRegisterOperation(ctx, op, args), op
+ handled := false
+ if blockPtr := blockAtomic.Load(); blockPtr != nil && blockPtr.Handler != nil {
+ // handler is replaced
+ blockPtr.Handler.ServeHTTP(w, tr)
+ blockPtr.Handler = nil
+ handled = true
+ }
+ return w, tr, afterHandle, handled
+}
+
+// WrapHandler wraps the given HTTP handler with the abstract HTTP operation defined by HandlerOperationArgs and
+// HandlerOperationRes.
+// The onBlock params are used to cleanup the context when needed.
+// It is a specific patch meant for Gin, for which we must abort the
+// context since it uses a queue of handlers and it's the only way to make
+// sure other queued handlers don't get executed.
+// TODO: this patch must be removed/improved when we rework our actions/operations system
+func WrapHandler(handler http.Handler, span ddtrace.Span, pathParams map[string]string, opts *Config) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ tw, tr, afterHandle, handled := BeforeHandle(w, r, span, pathParams, opts)
+ defer afterHandle()
+ if handled {
+ return
+ }
+
+ handler.ServeHTTP(tw, tr)
+ })
}
diff --git a/internal/appsec/emitter/httpsec/init.go b/internal/appsec/emitter/httpsec/init.go
deleted file mode 100644
index 9f4db28ff2..0000000000
--- a/internal/appsec/emitter/httpsec/init.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package httpsec
-
-import (
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec"
-)
-
-func init() {
- appsec.AddWAFEventListener(httpsec.Install)
-}
diff --git a/internal/appsec/emitter/httpsec/roundtripper.go b/internal/appsec/emitter/httpsec/roundtripper.go
index 9df86576e6..8a7f1f2448 100644
--- a/internal/appsec/emitter/httpsec/roundtripper.go
+++ b/internal/appsec/emitter/httpsec/roundtripper.go
@@ -11,14 +11,31 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/appsec/events"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
)
var badInputContextOnce sync.Once
+type (
+ RoundTripOperation struct {
+ dyngo.Operation
+ }
+
+ // RoundTripOperationArgs is the round trip operation arguments.
+ RoundTripOperationArgs struct {
+ // URL corresponds to the address `server.io.net.url`.
+ URL string
+ }
+
+ // RoundTripOperationRes is the round trip operation results.
+ RoundTripOperationRes struct{}
+)
+
+func (RoundTripOperationArgs) IsArgOf(*RoundTripOperation) {}
+func (RoundTripOperationRes) IsResultOf(*RoundTripOperation) {}
+
func ProtectRoundTrip(ctx context.Context, url string) error {
- opArgs := types.RoundTripOperationArgs{
+ opArgs := RoundTripOperationArgs{
URL: url,
}
@@ -32,7 +49,7 @@ func ProtectRoundTrip(ctx context.Context, url string) error {
return nil
}
- op := &types.RoundTripOperation{
+ op := &RoundTripOperation{
Operation: dyngo.NewOperation(parent),
}
@@ -43,7 +60,7 @@ func ProtectRoundTrip(ctx context.Context, url string) error {
})
dyngo.StartOperation(op, opArgs)
- dyngo.FinishOperation(op, types.RoundTripOperationRes{})
+ dyngo.FinishOperation(op, RoundTripOperationRes{})
if err != nil {
log.Debug("appsec: outgoing http request blocked by the WAF on URL: %s", url)
diff --git a/internal/appsec/emitter/httpsec/tags.go b/internal/appsec/emitter/httpsec/tags.go
deleted file mode 100644
index 0fc4357a9b..0000000000
--- a/internal/appsec/emitter/httpsec/tags.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package httpsec
-
-import (
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace"
-)
-
-// setRequestHeadersTags sets the AppSec-specific request headers span tags.
-func setRequestHeadersTags(span trace.TagSetter, headers map[string][]string) {
- setHeadersTags(span, "http.request.headers.", headers)
-}
-
-// setResponseHeadersTags sets the AppSec-specific response headers span tags.
-func setResponseHeadersTags(span trace.TagSetter, headers map[string][]string) {
- setHeadersTags(span, "http.response.headers.", headers)
-}
-
-// setHeadersTags sets the AppSec-specific headers span tags.
-func setHeadersTags(span trace.TagSetter, tagPrefix string, headers map[string][]string) {
- for h, v := range httptrace.NormalizeHTTPHeaders(headers) {
- span.SetTag(tagPrefix+h, v)
- }
-}
diff --git a/internal/appsec/emitter/httpsec/tags_test.go b/internal/appsec/emitter/httpsec/tags_test.go
deleted file mode 100644
index 963afe00d7..0000000000
--- a/internal/appsec/emitter/httpsec/tags_test.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package httpsec
-
-import (
- "fmt"
- "testing"
-
- testlib "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/_testlib"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestTags(t *testing.T) {
- for _, eventCase := range []struct {
- name string
- events []any
- expectedTag string
- expectedError bool
- }{
- {
- name: "no-event",
- events: nil,
- },
- {
- name: "one-event",
- events: []any{"one"},
- expectedTag: `{"triggers":["one"]}`,
- },
- {
- name: "two-events",
- events: []any{"one", "two"},
- expectedTag: `{"triggers":["one","two"]}`,
- },
- } {
- eventCase := eventCase
- for _, reqHeadersCase := range []struct {
- name string
- headers map[string][]string
- expectedTags map[string]interface{}
- }{
- {
- name: "zero-headers",
- },
- {
- name: "xff-header",
- headers: map[string][]string{
- "X-Forwarded-For": {"1.2.3.4", "4.5.6.7"},
- "my-header": {"something"},
- },
- expectedTags: map[string]interface{}{
- "http.request.headers.x-forwarded-for": "1.2.3.4,4.5.6.7",
- },
- },
- {
- name: "xff-header",
- headers: map[string][]string{
- "X-Forwarded-For": {"1.2.3.4"},
- "my-header": {"something"},
- },
- expectedTags: map[string]interface{}{
- "http.request.headers.x-forwarded-for": "1.2.3.4",
- },
- },
- {
- name: "no-monitored-headers",
- headers: map[string][]string{
- "my-header": {"something"},
- },
- },
- } {
- reqHeadersCase := reqHeadersCase
- for _, respHeadersCase := range []struct {
- name string
- headers map[string][]string
- expectedTags map[string]interface{}
- }{
- {
- name: "zero-headers",
- },
- {
- name: "ct-header",
- headers: map[string][]string{
- "Content-Type": {"application/json"},
- "my-header": {"something"},
- },
- expectedTags: map[string]interface{}{
- "http.response.headers.content-type": "application/json",
- },
- },
- {
- name: "no-monitored-headers",
- headers: map[string][]string{
- "my-header": {"something"},
- },
- },
- } {
- respHeadersCase := respHeadersCase
- t.Run(fmt.Sprintf("%s-%s-%s", eventCase.name, reqHeadersCase.name, respHeadersCase.name), func(t *testing.T) {
- var span testlib.MockSpan
- err := trace.SetEventSpanTags(&span, eventCase.events)
- if eventCase.expectedError {
- require.Error(t, err)
- return
- }
- require.NoError(t, err)
- setRequestHeadersTags(&span, reqHeadersCase.headers)
- setResponseHeadersTags(&span, respHeadersCase.headers)
-
- if eventCase.events != nil {
- testlib.RequireContainsMapSubset(t, span.Tags, map[string]interface{}{
- "_dd.appsec.json": eventCase.expectedTag,
- "manual.keep": true,
- "appsec.event": true,
- "_dd.origin": "appsec",
- })
- }
-
- if l := len(reqHeadersCase.expectedTags); l > 0 {
- testlib.RequireContainsMapSubset(t, span.Tags, reqHeadersCase.expectedTags)
- }
-
- if l := len(respHeadersCase.expectedTags); l > 0 {
- testlib.RequireContainsMapSubset(t, span.Tags, respHeadersCase.expectedTags)
- }
-
- require.False(t, span.Finished)
- })
- }
- }
- }
-}
diff --git a/internal/appsec/emitter/httpsec/types/types.go b/internal/appsec/emitter/httpsec/types/types.go
deleted file mode 100644
index 2ea8648b7c..0000000000
--- a/internal/appsec/emitter/httpsec/types/types.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package types
-
-import (
- "net/netip"
- "sync"
-
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
-)
-
-// Operation type representing an HTTP operation. It must be created with
-// StartOperation() and finished with its Finish().
-type (
- Operation struct {
- dyngo.Operation
- trace.TagsHolder
- trace.SecurityEventsHolder
- mu sync.RWMutex
- }
-
- // SDKBodyOperation type representing an SDK body
- SDKBodyOperation struct {
- dyngo.Operation
- }
-
- RoundTripOperation struct {
- dyngo.Operation
- }
-)
-
-// Finish the HTTP handler operation, along with the given results and emits a
-// finish event up in the operation stack.
-func (op *Operation) Finish(res HandlerOperationRes) []any {
- dyngo.FinishOperation(op, res)
- return op.Events()
-}
-
-// Abstract HTTP handler operation definition.
-type (
- // HandlerOperationArgs is the HTTP handler operation arguments.
- HandlerOperationArgs struct {
- // ClientIP corresponds to the address `http.client_ip`
- ClientIP netip.Addr
- // Headers corresponds to the address `server.request.headers.no_cookies`
- Headers map[string][]string
- // Cookies corresponds to the address `server.request.cookies`
- Cookies map[string][]string
- // Query corresponds to the address `server.request.query`
- Query map[string][]string
- // PathParams corresponds to the address `server.request.path_params`
- PathParams map[string]string
- // Method is the http method verb of the request, address is `server.request.method`
- Method string
- // RequestURI corresponds to the address `server.request.uri.raw`
- RequestURI string
- }
-
- // HandlerOperationRes is the HTTP handler operation results.
- HandlerOperationRes struct {
- Headers map[string][]string
- // Status corresponds to the address `server.response.status`.
- Status int
- }
-
- // SDKBodyOperationArgs is the SDK body operation arguments.
- SDKBodyOperationArgs struct {
- // Body corresponds to the address `server.request.body`.
- Body any
- }
-
- // SDKBodyOperationRes is the SDK body operation results.
- SDKBodyOperationRes struct{}
-
- // RoundTripOperationArgs is the round trip operation arguments.
- RoundTripOperationArgs struct {
- // URL corresponds to the address `server.io.net.url`.
- URL string
- }
-
- // RoundTripOperationRes is the round trip operation results.
- RoundTripOperationRes struct{}
-)
-
-// Finish finishes the SDKBody operation and emits a finish event
-func (op *SDKBodyOperation) Finish() {
- dyngo.FinishOperation(op, SDKBodyOperationRes{})
-}
-
-func (SDKBodyOperationArgs) IsArgOf(*SDKBodyOperation) {}
-func (SDKBodyOperationRes) IsResultOf(*SDKBodyOperation) {}
-
-func (HandlerOperationArgs) IsArgOf(*Operation) {}
-func (HandlerOperationRes) IsResultOf(*Operation) {}
-
-func (RoundTripOperationArgs) IsArgOf(*RoundTripOperation) {}
-func (RoundTripOperationRes) IsResultOf(*RoundTripOperation) {}
diff --git a/internal/appsec/emitter/sharedsec/shared.go b/internal/appsec/emitter/sharedsec/shared.go
deleted file mode 100644
index ec5b4a754f..0000000000
--- a/internal/appsec/emitter/sharedsec/shared.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2023 Datadog, Inc.
-
-package sharedsec
-
-import (
- "context"
- "reflect"
-
- "gopkg.in/DataDog/dd-trace-go.v1/appsec/events"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
-)
-
-type (
- // UserIDOperation type representing a call to appsec.SetUser(). It gets both created and destroyed in a single
- // call to ExecuteUserIDOperation
- UserIDOperation struct {
- dyngo.Operation
- }
- // UserIDOperationArgs is the user ID operation arguments.
- UserIDOperationArgs struct {
- UserID string
- }
- // UserIDOperationRes is the user ID operation results.
- UserIDOperationRes struct{}
-
- // OnUserIDOperationStart function type, called when a user ID
- // operation starts.
- OnUserIDOperationStart func(operation *UserIDOperation, args UserIDOperationArgs)
-)
-
-var userIDOperationArgsType = reflect.TypeOf((*UserIDOperationArgs)(nil)).Elem()
-
-// ExecuteUserIDOperation starts and finishes the UserID operation by emitting a dyngo start and finish events
-// An error is returned if the user associated to that operation must be blocked
-func ExecuteUserIDOperation(parent dyngo.Operation, args UserIDOperationArgs) error {
- var err error
- op := &UserIDOperation{Operation: dyngo.NewOperation(parent)}
- dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { err = e })
- dyngo.StartOperation(op, args)
- dyngo.FinishOperation(op, UserIDOperationRes{})
- return err
-}
-
-// ListenedType returns the type a OnUserIDOperationStart event listener
-// listens to, which is the UserIDOperationStartArgs type.
-func (OnUserIDOperationStart) ListenedType() reflect.Type { return userIDOperationArgsType }
-
-// Call the underlying event listener function by performing the type-assertion
-// on v whose type is the one returned by ListenedType().
-func (f OnUserIDOperationStart) Call(op dyngo.Operation, v interface{}) {
- f(op.(*UserIDOperation), v.(UserIDOperationArgs))
-}
-
-// MonitorUser starts and finishes a UserID operation.
-// A call to the WAF is made to check the user ID and an error is returned if the
-// user should be blocked. The return value is nil otherwise.
-func MonitorUser(ctx context.Context, userID string) error {
- if parent, ok := dyngo.FromContext(ctx); ok {
- return ExecuteUserIDOperation(parent, UserIDOperationArgs{UserID: userID})
- }
- log.Error("appsec: user ID monitoring ignored: could not find the http handler instrumentation metadata in the request context: the request handler is not being monitored by a middleware function or the provided context is not the expected request context")
- return nil
-
-}
-
-func (UserIDOperationArgs) IsArgOf(*UserIDOperation) {}
-func (UserIDOperationRes) IsResultOf(*UserIDOperation) {}
diff --git a/internal/appsec/emitter/sqlsec/sql.go b/internal/appsec/emitter/sqlsec/sql.go
index a29ab3c66b..1c888d9f85 100644
--- a/internal/appsec/emitter/sqlsec/sql.go
+++ b/internal/appsec/emitter/sqlsec/sql.go
@@ -11,14 +11,30 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/appsec/events"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec/types"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
)
var badInputContextOnce sync.Once
+type (
+ SQLOperation struct {
+ dyngo.Operation
+ }
+
+ SQLOperationArgs struct {
+ // Query corresponds to the addres `server.db.statement`
+ Query string
+ // Driver corresponds to the addres `server.db.system`
+ Driver string
+ }
+ SQLOperationRes struct{}
+)
+
+func (SQLOperationArgs) IsArgOf(*SQLOperation) {}
+func (SQLOperationRes) IsResultOf(*SQLOperation) {}
+
func ProtectSQLOperation(ctx context.Context, query, driver string) error {
- opArgs := types.SQLOperationArgs{
+ opArgs := SQLOperationArgs{
Query: query,
Driver: driver,
}
@@ -33,7 +49,7 @@ func ProtectSQLOperation(ctx context.Context, query, driver string) error {
return nil
}
- op := &types.SQLOperation{
+ op := &SQLOperation{
Operation: dyngo.NewOperation(parent),
}
@@ -44,7 +60,7 @@ func ProtectSQLOperation(ctx context.Context, query, driver string) error {
})
dyngo.StartOperation(op, opArgs)
- dyngo.FinishOperation(op, types.SQLOperationRes{})
+ dyngo.FinishOperation(op, SQLOperationRes{})
if err != nil {
log.Debug("appsec: outgoing SQL operation blocked by the WAF")
diff --git a/internal/appsec/emitter/sqlsec/types/sql.go b/internal/appsec/emitter/sqlsec/types/sql.go
deleted file mode 100644
index 379eb7f722..0000000000
--- a/internal/appsec/emitter/sqlsec/types/sql.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package types
-
-import (
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
-)
-
-type (
- SQLOperation struct {
- dyngo.Operation
- }
-
- SQLOperationArgs struct {
- // Query corresponds to the addres `server.db.statement`
- Query string
- // Driver corresponds to the addres `server.db.system`
- Driver string
- }
- SQLOperationRes struct{}
-)
-
-func (SQLOperationArgs) IsArgOf(*SQLOperation) {}
-func (SQLOperationRes) IsResultOf(*SQLOperation) {}
diff --git a/internal/appsec/emitter/trace/service_entry_span.go b/internal/appsec/emitter/trace/service_entry_span.go
new file mode 100644
index 0000000000..98e14b092f
--- /dev/null
+++ b/internal/appsec/emitter/trace/service_entry_span.go
@@ -0,0 +1,158 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package trace
+
+import (
+ "context"
+ "encoding/json"
+ "sync"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+type (
+ // ServiceEntrySpanOperation is a dyngo.Operation that holds a the first span of a service. Usually a http or grpc span.
+ ServiceEntrySpanOperation struct {
+ dyngo.Operation
+ tags map[string]any
+ jsonTags map[string]any
+ mu sync.Mutex
+ }
+
+ // ServiceEntrySpanArgs is the arguments for a ServiceEntrySpanOperation
+ ServiceEntrySpanArgs struct{}
+
+ // ServiceEntrySpanTag is a key value pair event that is used to tag a service entry span
+ ServiceEntrySpanTag struct {
+ Key string
+ Value any
+ }
+
+ // JSONServiceEntrySpanTag is a key value pair event that is used to tag a service entry span
+ // It will be serialized as JSON when added to the span
+ JSONServiceEntrySpanTag struct {
+ Key string
+ Value any
+ }
+
+ // ServiceEntrySpanTagsBulk is a bulk event that is used to send tags to a service entry span
+ ServiceEntrySpanTagsBulk struct {
+ Tags []JSONServiceEntrySpanTag
+ SerializableTags []JSONServiceEntrySpanTag
+ }
+)
+
+func (ServiceEntrySpanArgs) IsArgOf(*ServiceEntrySpanOperation) {}
+
+// SetTag adds the key/value pair to the tags to add to the service entry span
+func (op *ServiceEntrySpanOperation) SetTag(key string, value any) {
+ op.mu.Lock()
+ defer op.mu.Unlock()
+ op.tags[key] = value
+}
+
+// SetSerializableTag adds the key/value pair to the tags to add to the service entry span.
+// The value MAY be serialized as JSON if necessary but simple types will not be serialized.
+func (op *ServiceEntrySpanOperation) SetSerializableTag(key string, value any) {
+ op.mu.Lock()
+ defer op.mu.Unlock()
+ op.setSerializableTag(key, value)
+}
+
+// SetSerializableTags adds the key/value pairs to the tags to add to the service entry span.
+// Values MAY be serialized as JSON if necessary but simple types will not be serialized.
+func (op *ServiceEntrySpanOperation) SetSerializableTags(tags map[string]any) {
+ op.mu.Lock()
+ defer op.mu.Unlock()
+ for key, value := range tags {
+ op.setSerializableTag(key, value)
+ }
+}
+
+func (op *ServiceEntrySpanOperation) setSerializableTag(key string, value any) {
+ switch value.(type) {
+ case string, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, bool:
+ op.tags[key] = value
+ default:
+ op.jsonTags[key] = value
+ }
+}
+
+// SetTags fills the span tags using the key/value pairs found in `tags`
+func (op *ServiceEntrySpanOperation) SetTags(tags map[string]any) {
+ op.mu.Lock()
+ defer op.mu.Unlock()
+ for k, v := range tags {
+ op.tags[k] = v
+ }
+}
+
+// SetStringTags fills the span tags using the key/value pairs found in `tags`
+func (op *ServiceEntrySpanOperation) SetStringTags(tags map[string]string) {
+ op.mu.Lock()
+ defer op.mu.Unlock()
+ for k, v := range tags {
+ op.tags[k] = v
+ }
+}
+
+// OnServiceEntrySpanTagEvent is a callback that is called when a dyngo.OnData is triggered with a ServiceEntrySpanTag event
+func (op *ServiceEntrySpanOperation) OnServiceEntrySpanTagEvent(tag ServiceEntrySpanTag) {
+ op.SetTag(tag.Key, tag.Value)
+}
+
+// OnJSONServiceEntrySpanTagEvent is a callback that is called when a dyngo.OnData is triggered with a JSONServiceEntrySpanTag event
+func (op *ServiceEntrySpanOperation) OnJSONServiceEntrySpanTagEvent(tag JSONServiceEntrySpanTag) {
+ op.SetSerializableTag(tag.Key, tag.Value)
+}
+
+// OnServiceEntrySpanTagsBulkEvent is a callback that is called when a dyngo.OnData is triggered with a ServiceEntrySpanTagsBulk event
+func (op *ServiceEntrySpanOperation) OnServiceEntrySpanTagsBulkEvent(bulk ServiceEntrySpanTagsBulk) {
+ for _, v := range bulk.Tags {
+ op.SetTag(v.Key, v.Value)
+ }
+
+ for _, v := range bulk.SerializableTags {
+ op.SetSerializableTag(v.Key, v.Value)
+ }
+}
+
+// OnSpanTagEvent is a listener for SpanTag events.
+func (op *ServiceEntrySpanOperation) OnSpanTagEvent(tag SpanTag) {
+ op.SetTag(tag.Key, tag.Value)
+}
+
+func StartServiceEntrySpanOperation(ctx context.Context) (*ServiceEntrySpanOperation, context.Context) {
+ parent, _ := dyngo.FromContext(ctx)
+ op := &ServiceEntrySpanOperation{
+ Operation: dyngo.NewOperation(parent),
+ tags: make(map[string]any),
+ jsonTags: make(map[string]any),
+ }
+ return op, dyngo.StartAndRegisterOperation(ctx, op, ServiceEntrySpanArgs{})
+}
+
+func (op *ServiceEntrySpanOperation) Finish(span TagSetter) {
+ if _, ok := span.(*NoopTagSetter); ok { // If the span is a NoopTagSetter or is nil, we don't need to set any tags
+ return
+ }
+
+ op.mu.Lock()
+ defer op.mu.Unlock()
+
+ for k, v := range op.tags {
+ span.SetTag(k, v)
+ }
+
+ for k, v := range op.jsonTags {
+ strValue, err := json.Marshal(v)
+ if err != nil {
+ log.Debug("appsec: failed to marshal tag %s: %v", k, err)
+ }
+ span.SetTag(k, string(strValue))
+ }
+}
diff --git a/internal/appsec/emitter/trace/span.go b/internal/appsec/emitter/trace/span.go
new file mode 100644
index 0000000000..d6614f90de
--- /dev/null
+++ b/internal/appsec/emitter/trace/span.go
@@ -0,0 +1,67 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package trace
+
+import (
+ "context"
+ "sync"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+)
+
+type (
+ // SpanOperation is a dyngo.Operation that holds a ddtrace.Span.
+ // It used as a middleware for appsec code and the tracer code
+ // hopefully some day this operation will create spans instead of simply using them
+ SpanOperation struct {
+ dyngo.Operation
+ tags map[string]any
+ mu sync.Mutex
+ }
+
+ // SpanArgs is the arguments for a SpanOperation
+ SpanArgs struct{}
+
+ // SpanTag is a key value pair event that is used to tag the current span
+ SpanTag struct {
+ Key string
+ Value any
+ }
+)
+
+func (SpanArgs) IsArgOf(*SpanOperation) {}
+
+// SetTag adds the key/value pair to the tags to add to the span
+func (op *SpanOperation) SetTag(key string, value any) {
+ op.mu.Lock()
+ defer op.mu.Unlock()
+ op.tags[key] = value
+}
+
+// OnSpanTagEvent is a listener for SpanTag events.
+func (op *SpanOperation) OnSpanTagEvent(tag SpanTag) {
+ op.SetTag(tag.Key, tag.Value)
+}
+
+func StartSpanOperation(ctx context.Context) (*SpanOperation, context.Context) {
+ op := &SpanOperation{
+ tags: make(map[string]any),
+ }
+ return op, dyngo.StartAndRegisterOperation(ctx, op, SpanArgs{})
+}
+
+func (op *SpanOperation) Finish(span TagSetter) {
+ if _, ok := span.(*NoopTagSetter); ok { // If the span is a NoopTagSetter or is nil, we don't need to set any tags
+ return
+ }
+
+ op.mu.Lock()
+ defer op.mu.Unlock()
+
+ for k, v := range op.tags {
+ span.SetTag(k, v)
+ }
+}
diff --git a/internal/appsec/emitter/trace/tag_setter.go b/internal/appsec/emitter/trace/tag_setter.go
new file mode 100644
index 0000000000..a7f5bc1944
--- /dev/null
+++ b/internal/appsec/emitter/trace/tag_setter.go
@@ -0,0 +1,29 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package trace
+
+// TagSetter is the interface needed to set a span tag.
+type TagSetter interface {
+ SetTag(string, any)
+}
+
+// NoopTagSetter is a TagSetter that does nothing. Useful when no tracer
+// Span is available, but a TagSetter is assumed.
+type NoopTagSetter struct{}
+
+func (NoopTagSetter) SetTag(string, any) {
+ // Do nothing
+}
+
+type TestTagSetter map[string]any
+
+func (t TestTagSetter) SetTag(key string, value any) {
+ t[key] = value
+}
+
+func (t TestTagSetter) Tags() map[string]any {
+ return t
+}
diff --git a/internal/appsec/emitter/usersec/user.go b/internal/appsec/emitter/usersec/user.go
new file mode 100644
index 0000000000..50a4352e6d
--- /dev/null
+++ b/internal/appsec/emitter/usersec/user.go
@@ -0,0 +1,51 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package usersec
+
+import (
+ "context"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/appsec/events"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+)
+
+const errorLog = `
+appsec: user login monitoring ignored: could not find the http handler instrumentation metadata in the request context:
+ the request handler is not being monitored by a middleware function or the provided context is not the expected request context
+`
+
+type (
+ // UserLoginOperation type representing a call to appsec.SetUser(). It gets both created and destroyed in a single
+ // call to ExecuteUserIDOperation
+ UserLoginOperation struct {
+ dyngo.Operation
+ }
+ // UserLoginOperationArgs is the user ID operation arguments.
+ UserLoginOperationArgs struct{}
+
+ // UserLoginOperationRes is the user ID operation results.
+ UserLoginOperationRes struct {
+ UserID string
+ SessionID string
+ Success bool
+ }
+)
+
+func StartUserLoginOperation(ctx context.Context, args UserLoginOperationArgs) (*UserLoginOperation, *error) {
+ parent, _ := dyngo.FromContext(ctx)
+ op := &UserLoginOperation{Operation: dyngo.NewOperation(parent)}
+ var err error
+ dyngo.OnData(op, func(e *events.BlockingSecurityEvent) { err = e })
+ dyngo.StartOperation(op, args)
+ return op, &err
+}
+
+func (op *UserLoginOperation) Finish(args UserLoginOperationRes) {
+ dyngo.FinishOperation(op, args)
+}
+
+func (UserLoginOperationArgs) IsArgOf(*UserLoginOperation) {}
+func (UserLoginOperationRes) IsResultOf(*UserLoginOperation) {}
diff --git a/internal/appsec/emitter/waf/actions/actions.go b/internal/appsec/emitter/waf/actions/actions.go
new file mode 100644
index 0000000000..4eabcfaff6
--- /dev/null
+++ b/internal/appsec/emitter/waf/actions/actions.go
@@ -0,0 +1,56 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package actions
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+type (
+ // Action is a generic interface that represents any WAF action
+ Action interface {
+ EmitData(op dyngo.Operation)
+ }
+)
+
+type actionHandler func(map[string]any) []Action
+
+// actionHandlers is a map of action types to their respective handler functions
+// It is populated by the init functions of the actions packages
+var actionHandlers = map[string]actionHandler{}
+
+func registerActionHandler(aType string, handler actionHandler) {
+ if _, ok := actionHandlers[aType]; ok {
+ log.Warn("appsec: action type `%s` already registered", aType)
+ return
+ }
+ actionHandlers[aType] = handler
+}
+
+// SendActionEvents sends the relevant actions to the operation's data listener.
+// It returns true if at least one of those actions require interrupting the request handler
+// When SDKError is not nil, this error is sent to the op with EmitData so that the invoked SDK can return it
+func SendActionEvents(op dyngo.Operation, actions map[string]any) {
+ for aType, params := range actions {
+ log.Debug("appsec: processing %s action with params %v", aType, params)
+ params, ok := params.(map[string]any)
+ if !ok {
+ log.Debug("appsec: could not cast action params to map[string]any from %T", params)
+ continue
+ }
+
+ actionHandler, ok := actionHandlers[aType]
+ if !ok {
+ log.Debug("appsec: unknown action type `%s`", aType)
+ continue
+ }
+
+ for _, a := range actionHandler(params) {
+ a.EmitData(op)
+ }
+ }
+}
diff --git a/internal/appsec/emitter/sharedsec/actions_test.go b/internal/appsec/emitter/waf/actions/actions_test.go
similarity index 96%
rename from internal/appsec/emitter/sharedsec/actions_test.go
rename to internal/appsec/emitter/waf/actions/actions_test.go
index 7f40a4f26f..1e77c8e2d1 100644
--- a/internal/appsec/emitter/sharedsec/actions_test.go
+++ b/internal/appsec/emitter/waf/actions/actions_test.go
@@ -1,9 +1,9 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
+// Copyright 2024 Datadog, Inc.
-package sharedsec
+package actions
import (
"io"
@@ -161,11 +161,11 @@ func TestNewRedirectRequestAction(t *testing.T) {
mux.HandleFunc("/redirect-no-location", newRedirectRequestAction(303, "").ServeHTTP)
mux.HandleFunc("/redirect1", newRedirectRequestAction(http.StatusFound, "/redirect2").ServeHTTP)
mux.HandleFunc("/redirect2", newRedirectRequestAction(http.StatusFound, "/redirected").ServeHTTP)
- mux.HandleFunc("/redirected", func(w http.ResponseWriter, r *http.Request) {
+ mux.HandleFunc("/redirected", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK) // Shouldn't matter since we write 302 before arriving here
w.Write([]byte("Redirected"))
})
- srv.Client().CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ srv.Client().CheckRedirect = func(_ *http.Request, via []*http.Request) error {
require.GreaterOrEqual(t, len(via), 1)
require.Equal(t, "/redirect1", via[0].URL.Path)
if len(via) == 2 {
@@ -206,7 +206,7 @@ func TestNewRedirectRequestAction(t *testing.T) {
// - empty location: revert to default blocking action instead
// - status code outside of [300, 399]: default to 303
t.Run("no-location", func(t *testing.T) {
- srv.Client().CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ srv.Client().CheckRedirect = func(_ *http.Request, _ []*http.Request) error {
return nil
}
req, err := http.NewRequest("POST", srv.URL+"/redirect-no-location", nil)
diff --git a/internal/appsec/emitter/sharedsec/actions.go b/internal/appsec/emitter/waf/actions/block.go
similarity index 53%
rename from internal/appsec/emitter/sharedsec/actions.go
rename to internal/appsec/emitter/waf/actions/block.go
index 49cd65a5d4..ae802b60bd 100644
--- a/internal/appsec/emitter/sharedsec/actions.go
+++ b/internal/appsec/emitter/waf/actions/block.go
@@ -1,22 +1,21 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2022 Datadog, Inc.
+// Copyright 2024 Datadog, Inc.
-package sharedsec
+package actions
import (
- _ "embed" // Blank import
+ _ "embed" // embed is used to embed the blocked-template.json and blocked-template.html files
"net/http"
"os"
"strings"
+ "github.com/mitchellh/mapstructure"
+
"gopkg.in/DataDog/dd-trace-go.v1/appsec/events"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace"
-
- "github.com/mitchellh/mapstructure"
)
// blockedTemplateJSON is the default JSON template used to write responses for blocked requests
@@ -43,38 +42,12 @@ func init() {
*template = t
}
}
-
}
+
+ registerActionHandler("block_request", NewBlockAction)
}
type (
- // Action is a generic interface that represents any WAF action
- Action interface {
- Blocking() bool
- EmitData(op dyngo.Operation)
- }
-
- // HTTPAction are actions that interact with an HTTP request flow (block, redirect...)
- HTTPAction struct {
- http.Handler
- }
- // GRPCAction are actions that interact with a GRPC request flow
- GRPCAction struct {
- GRPCWrapper
- }
- // StackTraceAction are actions that generate a stacktrace
- StackTraceAction struct {
- Event stacktrace.Event
- }
-
- // GRPCWrapper is an opaque prototype abstraction for a gRPC handler (to avoid importing grpc)
- // that returns a status code and an error
- // TODO: rely on strongly typed actions (with the actual grpc types) by introducing WAF constructors
- // living in the contrib packages, along with their dependencies - something like `appsec.RegisterWAFConstructor(newGRPCWAF)`
- // Such constructors would receive the full appsec config and rules, so that they would be able to build
- // specific blocking actions.
- GRPCWrapper func() (uint32, error)
-
// blockActionParams are the dynamic parameters to be provided to a "block_request"
// action type upon invocation
blockActionParams struct {
@@ -84,40 +57,58 @@ type (
StatusCode int `mapstructure:"status_code"`
Type string `mapstructure:"type,omitempty"`
}
- // redirectActionParams are the dynamic parameters to be provided to a "redirect_request"
- // action type upon invocation
- redirectActionParams struct {
- Location string `mapstructure:"location,omitempty"`
- StatusCode int `mapstructure:"status_code"`
+ // GRPCWrapper is an opaque prototype abstraction for a gRPC handler (to avoid importing grpc)
+ // that returns a status code and an error
+ GRPCWrapper func() (uint32, error)
+
+ // BlockGRPC are actions that interact with a GRPC request flow
+ BlockGRPC struct {
+ GRPCWrapper
+ }
+
+ // BlockHTTP are actions that interact with an HTTP request flow
+ BlockHTTP struct {
+ http.Handler
}
)
-func (a *HTTPAction) Blocking() bool { return true }
-func (a *HTTPAction) EmitData(op dyngo.Operation) { dyngo.EmitData(op, a) }
+func (a *BlockGRPC) EmitData(op dyngo.Operation) {
+ dyngo.EmitData(op, a)
+ dyngo.EmitData(op, &events.BlockingSecurityEvent{})
+}
-func (a *GRPCAction) Blocking() bool { return true }
-func (a *GRPCAction) EmitData(op dyngo.Operation) { dyngo.EmitData(op, a) }
+func (a *BlockHTTP) EmitData(op dyngo.Operation) {
+ dyngo.EmitData(op, a)
+ dyngo.EmitData(op, &events.BlockingSecurityEvent{})
+}
-func (a *StackTraceAction) Blocking() bool { return false }
-func (a *StackTraceAction) EmitData(op dyngo.Operation) { dyngo.EmitData(op, a) }
+func newGRPCBlockRequestAction(status int) *BlockGRPC {
+ return &BlockGRPC{GRPCWrapper: newGRPCBlockHandler(status)}
+}
-// NewStackTraceAction creates an action for the "stacktrace" action type
-func NewStackTraceAction(params map[string]any) Action {
- id, ok := params["stack_id"]
- if !ok {
- log.Debug("appsec: could not read stack_id parameter for generate_stack action")
- return nil
+func newGRPCBlockHandler(status int) GRPCWrapper {
+ return func() (uint32, error) {
+ return uint32(status), &events.BlockingSecurityEvent{}
}
+}
- strID, ok := id.(string)
- if !ok {
- log.Debug("appsec: could not cast stacktrace ID to string")
- return nil
+func blockParamsFromMap(params map[string]any) (blockActionParams, error) {
+ grpcCode := 10
+ p := blockActionParams{
+ Type: "auto",
+ StatusCode: 403,
+ GRPCStatusCode: &grpcCode,
}
- event := stacktrace.NewEvent(stacktrace.ExploitEvent, stacktrace.WithID(strID))
+ if err := mapstructure.WeakDecode(params, &p); err != nil {
+ return p, err
+ }
- return &StackTraceAction{Event: *event}
+ if p.GRPCStatusCode == nil {
+ p.GRPCStatusCode = &grpcCode
+ }
+
+ return p, nil
}
// NewBlockAction creates an action for the "block_request" action type
@@ -133,36 +124,8 @@ func NewBlockAction(params map[string]any) []Action {
}
}
-// NewRedirectAction creates an action for the "redirect_request" action type
-func NewRedirectAction(params map[string]any) *HTTPAction {
- p, err := redirectParamsFromMap(params)
- if err != nil {
- log.Debug("appsec: couldn't decode redirect action parameters")
- return nil
- }
- return newRedirectRequestAction(p.StatusCode, p.Location)
-}
-
-func newHTTPBlockRequestAction(status int, template string) *HTTPAction {
- return &HTTPAction{Handler: newBlockHandler(status, template)}
-}
-
-func newGRPCBlockRequestAction(status int) *GRPCAction {
- return &GRPCAction{GRPCWrapper: newGRPCBlockHandler(status)}
-
-}
-
-func newRedirectRequestAction(status int, loc string) *HTTPAction {
- // Default to 303 if status is out of redirection codes bounds
- if status < 300 || status >= 400 {
- status = 303
- }
-
- // If location is not set we fall back on a default block action
- if loc == "" {
- return &HTTPAction{Handler: newBlockHandler(403, string(blockedTemplateJSON))}
- }
- return &HTTPAction{Handler: http.RedirectHandler(loc, status)}
+func newHTTPBlockRequestAction(status int, template string) *BlockHTTP {
+ return &BlockHTTP{Handler: newBlockHandler(status, template)}
}
// newBlockHandler creates, initializes and returns a new BlockRequestAction
@@ -190,40 +153,9 @@ func newBlockHandler(status int, template string) http.Handler {
}
func newBlockRequestHandler(status int, ct string, payload []byte) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", ct)
w.WriteHeader(status)
w.Write(payload)
})
}
-
-func newGRPCBlockHandler(status int) GRPCWrapper {
- return func() (uint32, error) {
- return uint32(status), &events.BlockingSecurityEvent{}
- }
-}
-
-func blockParamsFromMap(params map[string]any) (blockActionParams, error) {
- grpcCode := 10
- p := blockActionParams{
- Type: "auto",
- StatusCode: 403,
- GRPCStatusCode: &grpcCode,
- }
-
- if err := mapstructure.WeakDecode(params, &p); err != nil {
- return p, err
- }
-
- if p.GRPCStatusCode == nil {
- p.GRPCStatusCode = &grpcCode
- }
- return p, nil
-
-}
-
-func redirectParamsFromMap(params map[string]any) (redirectActionParams, error) {
- var p redirectActionParams
- err := mapstructure.WeakDecode(params, &p)
- return p, err
-}
diff --git a/internal/appsec/emitter/sharedsec/blocked-template.html b/internal/appsec/emitter/waf/actions/blocked-template.html
similarity index 100%
rename from internal/appsec/emitter/sharedsec/blocked-template.html
rename to internal/appsec/emitter/waf/actions/blocked-template.html
diff --git a/internal/appsec/emitter/sharedsec/blocked-template.json b/internal/appsec/emitter/waf/actions/blocked-template.json
similarity index 78%
rename from internal/appsec/emitter/sharedsec/blocked-template.json
rename to internal/appsec/emitter/waf/actions/blocked-template.json
index 885d766c18..12ae29696f 100644
--- a/internal/appsec/emitter/sharedsec/blocked-template.json
+++ b/internal/appsec/emitter/waf/actions/blocked-template.json
@@ -1 +1 @@
-{"errors":[{"title":"You've been blocked","detail":"Sorry, you cannot access this page. Please contact the customer service team. Security provided by Datadog."}]}
+{"errors":[{"title":"You've been blocked","detail":"Sorry, you cannot access this page. Please contact the customer service team. Security provided by Datadog."}]}
\ No newline at end of file
diff --git a/internal/appsec/emitter/waf/actions/http_redirect.go b/internal/appsec/emitter/waf/actions/http_redirect.go
new file mode 100644
index 0000000000..3cdca4c818
--- /dev/null
+++ b/internal/appsec/emitter/waf/actions/http_redirect.go
@@ -0,0 +1,54 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package actions
+
+import (
+ "net/http"
+
+ "github.com/mitchellh/mapstructure"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// redirectActionParams are the dynamic parameters to be provided to a "redirect_request"
+// action type upon invocation
+type redirectActionParams struct {
+ Location string `mapstructure:"location,omitempty"`
+ StatusCode int `mapstructure:"status_code"`
+}
+
+func init() {
+ registerActionHandler("redirect_request", NewRedirectAction)
+}
+
+func redirectParamsFromMap(params map[string]any) (redirectActionParams, error) {
+ var p redirectActionParams
+ err := mapstructure.WeakDecode(params, &p)
+ return p, err
+}
+
+func newRedirectRequestAction(status int, loc string) *BlockHTTP {
+ // Default to 303 if status is out of redirection codes bounds
+ if status < http.StatusMultipleChoices || status >= http.StatusBadRequest {
+ status = http.StatusSeeOther
+ }
+
+ // If location is not set we fall back on a default block action
+ if loc == "" {
+ return &BlockHTTP{Handler: newBlockHandler(http.StatusForbidden, string(blockedTemplateJSON))}
+ }
+ return &BlockHTTP{Handler: http.RedirectHandler(loc, status)}
+}
+
+// NewRedirectAction creates an action for the "redirect_request" action type
+func NewRedirectAction(params map[string]any) []Action {
+ p, err := redirectParamsFromMap(params)
+ if err != nil {
+ log.Debug("appsec: couldn't decode redirect action parameters")
+ return nil
+ }
+ return []Action{newRedirectRequestAction(p.StatusCode, p.Location)}
+}
diff --git a/internal/appsec/emitter/waf/actions/stacktrace.go b/internal/appsec/emitter/waf/actions/stacktrace.go
new file mode 100644
index 0000000000..47b4dd6078
--- /dev/null
+++ b/internal/appsec/emitter/waf/actions/stacktrace.go
@@ -0,0 +1,44 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package actions
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace"
+)
+
+func init() {
+ registerActionHandler("generate_stack", NewStackTraceAction)
+}
+
+// StackTraceAction are actions that generate a stacktrace
+type StackTraceAction struct {
+ Event *stacktrace.Event
+}
+
+func (a *StackTraceAction) EmitData(op dyngo.Operation) { dyngo.EmitData(op, a) }
+
+// NewStackTraceAction creates an action for the "stacktrace" action type
+func NewStackTraceAction(params map[string]any) []Action {
+ id, ok := params["stack_id"]
+ if !ok {
+ log.Debug("appsec: could not read stack_id parameter for generate_stack action")
+ return nil
+ }
+
+ strID, ok := id.(string)
+ if !ok {
+ log.Debug("appsec: could not cast stacktrace ID to string")
+ return nil
+ }
+
+ return []Action{
+ &StackTraceAction{
+ stacktrace.NewEvent(stacktrace.ExploitEvent, stacktrace.WithID(strID)),
+ },
+ }
+}
diff --git a/internal/appsec/emitter/waf/addresses/addresses.go b/internal/appsec/emitter/waf/addresses/addresses.go
new file mode 100644
index 0000000000..03163df23a
--- /dev/null
+++ b/internal/appsec/emitter/waf/addresses/addresses.go
@@ -0,0 +1,40 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package addresses
+
+const (
+ ServerRequestMethodAddr = "server.request.method"
+ ServerRequestRawURIAddr = "server.request.uri.raw"
+ ServerRequestHeadersNoCookiesAddr = "server.request.headers.no_cookies"
+ ServerRequestCookiesAddr = "server.request.cookies"
+ ServerRequestQueryAddr = "server.request.query"
+ ServerRequestPathParamsAddr = "server.request.path_params"
+ ServerRequestBodyAddr = "server.request.body"
+ ServerResponseStatusAddr = "server.response.status"
+ ServerResponseHeadersNoCookiesAddr = "server.response.headers.no_cookies"
+
+ ClientIPAddr = "http.client_ip"
+
+ UserIDAddr = "usr.id"
+ UserSessionIDAddr = "usr.session_id"
+ UserLoginSuccessAddr = "server.business_logic.users.login.success"
+ UserLoginFailureAddr = "server.business_logic.users.login.failure"
+
+ ServerIoNetURLAddr = "server.io.net.url"
+ ServerIOFSFileAddr = "server.io.fs.file"
+ ServerDBStatementAddr = "server.db.statement"
+ ServerDBTypeAddr = "server.db.system"
+
+ GRPCServerMethodAddr = "grpc.server.method"
+ GRPCServerRequestMetadataAddr = "grpc.server.request.metadata"
+ GRPCServerRequestMessageAddr = "grpc.server.request.message"
+ GRPCServerResponseMessageAddr = "grpc.server.response.message"
+ GRPCServerResponseMetadataHeadersAddr = "grpc.server.response.metadata.headers"
+ GRPCServerResponseMetadataTrailersAddr = "grpc.server.response.metadata.trailers"
+ GRPCServerResponseStatusCodeAddr = "grpc.server.response.status"
+
+ GraphQLServerResolverAddr = "graphql.server.resolver"
+)
diff --git a/internal/appsec/emitter/waf/addresses/builder.go b/internal/appsec/emitter/waf/addresses/builder.go
new file mode 100644
index 0000000000..946a62bcf9
--- /dev/null
+++ b/internal/appsec/emitter/waf/addresses/builder.go
@@ -0,0 +1,243 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package addresses
+
+import (
+ "net/netip"
+ "strconv"
+
+ waf "github.com/DataDog/go-libddwaf/v3"
+)
+
+const contextProcessKey = "waf.context.processor"
+
+type RunAddressDataBuilder struct {
+ waf.RunAddressData
+}
+
+func NewAddressesBuilder() *RunAddressDataBuilder {
+ return &RunAddressDataBuilder{
+ RunAddressData: waf.RunAddressData{
+ Persistent: make(map[string]any, 1),
+ Ephemeral: make(map[string]any, 1),
+ },
+ }
+}
+
+func (b *RunAddressDataBuilder) WithMethod(method string) *RunAddressDataBuilder {
+ b.Persistent[ServerRequestMethodAddr] = method
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithRawURI(uri string) *RunAddressDataBuilder {
+ b.Persistent[ServerRequestRawURIAddr] = uri
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithHeadersNoCookies(headers map[string][]string) *RunAddressDataBuilder {
+ if len(headers) == 0 {
+ headers = nil
+ }
+ b.Persistent[ServerRequestHeadersNoCookiesAddr] = headers
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithCookies(cookies map[string][]string) *RunAddressDataBuilder {
+ if len(cookies) == 0 {
+ cookies = nil
+ }
+ b.Persistent[ServerRequestCookiesAddr] = cookies
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithQuery(query map[string][]string) *RunAddressDataBuilder {
+ if len(query) == 0 {
+ query = nil
+ }
+ b.Persistent[ServerRequestQueryAddr] = query
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithPathParams(params map[string]string) *RunAddressDataBuilder {
+ if len(params) == 0 {
+ return b
+ }
+ b.Persistent[ServerRequestPathParamsAddr] = params
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithRequestBody(body any) *RunAddressDataBuilder {
+ if body == nil {
+ return b
+ }
+ b.Persistent[ServerRequestBodyAddr] = body
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithResponseStatus(status int) *RunAddressDataBuilder {
+ if status == 0 {
+ return b
+ }
+ b.Persistent[ServerResponseStatusAddr] = strconv.Itoa(status)
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithResponseHeadersNoCookies(headers map[string][]string) *RunAddressDataBuilder {
+ if len(headers) == 0 {
+ return b
+ }
+ b.Persistent[ServerResponseHeadersNoCookiesAddr] = headers
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithClientIP(ip netip.Addr) *RunAddressDataBuilder {
+ if !ip.IsValid() {
+ return b
+ }
+ b.Persistent[ClientIPAddr] = ip.String()
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithUserID(id string) *RunAddressDataBuilder {
+ if id == "" {
+ return b
+ }
+ b.Persistent[UserIDAddr] = id
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithUserSessionID(id string) *RunAddressDataBuilder {
+ if id == "" {
+ return b
+ }
+ b.Persistent[UserSessionIDAddr] = id
+ return b
+
+}
+
+func (b *RunAddressDataBuilder) WithUserLoginSuccess() *RunAddressDataBuilder {
+ b.Persistent[UserLoginSuccessAddr] = nil
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithUserLoginFailure() *RunAddressDataBuilder {
+ b.Persistent[UserLoginFailureAddr] = nil
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithFilePath(file string) *RunAddressDataBuilder {
+ if file == "" {
+ return b
+ }
+ b.Ephemeral[ServerIOFSFileAddr] = file
+ b.Scope = waf.RASPScope
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithURL(url string) *RunAddressDataBuilder {
+ if url == "" {
+ return b
+ }
+ b.Ephemeral[ServerIoNetURLAddr] = url
+ b.Scope = waf.RASPScope
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithDBStatement(statement string) *RunAddressDataBuilder {
+ if statement == "" {
+ return b
+ }
+ b.Ephemeral[ServerDBStatementAddr] = statement
+ b.Scope = waf.RASPScope
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithDBType(driver string) *RunAddressDataBuilder {
+ if driver == "" {
+ return b
+ }
+ b.Ephemeral[ServerDBTypeAddr] = driver
+ b.Scope = waf.RASPScope
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithGRPCMethod(method string) *RunAddressDataBuilder {
+ if method == "" {
+ return b
+ }
+ b.Persistent[GRPCServerMethodAddr] = method
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithGRPCRequestMessage(message any) *RunAddressDataBuilder {
+ if message == nil {
+ return b
+ }
+ b.Ephemeral[GRPCServerRequestMessageAddr] = message
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithGRPCRequestMetadata(metadata map[string][]string) *RunAddressDataBuilder {
+ if len(metadata) == 0 {
+ return b
+ }
+ b.Persistent[GRPCServerRequestMetadataAddr] = metadata
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithGRPCResponseMessage(message any) *RunAddressDataBuilder {
+ if message == nil {
+ return b
+ }
+ b.Ephemeral[GRPCServerResponseMessageAddr] = message
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithGRPCResponseMetadataHeaders(headers map[string][]string) *RunAddressDataBuilder {
+ if len(headers) == 0 {
+ return b
+ }
+ b.Persistent[GRPCServerResponseMetadataHeadersAddr] = headers
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithGRPCResponseMetadataTrailers(trailers map[string][]string) *RunAddressDataBuilder {
+ if len(trailers) == 0 {
+ return b
+ }
+ b.Persistent[GRPCServerResponseMetadataTrailersAddr] = trailers
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithGRPCResponseStatusCode(status int) *RunAddressDataBuilder {
+ if status == 0 {
+ return b
+ }
+ b.Persistent[GRPCServerResponseStatusCodeAddr] = strconv.Itoa(status)
+ return b
+}
+
+func (b *RunAddressDataBuilder) WithGraphQLResolver(fieldName string, args map[string]any) *RunAddressDataBuilder {
+ if _, ok := b.Ephemeral[GraphQLServerResolverAddr]; !ok {
+ b.Ephemeral[GraphQLServerResolverAddr] = map[string]any{}
+ }
+
+ b.Ephemeral[GraphQLServerResolverAddr].(map[string]any)[fieldName] = args
+ return b
+}
+
+func (b *RunAddressDataBuilder) ExtractSchema() *RunAddressDataBuilder {
+ if _, ok := b.Persistent[contextProcessKey]; !ok {
+ b.Persistent[contextProcessKey] = map[string]bool{}
+ }
+
+ b.Persistent[contextProcessKey].(map[string]bool)["extract-schema"] = true
+ return b
+}
+
+func (b *RunAddressDataBuilder) Build() waf.RunAddressData {
+ return b.RunAddressData
+}
diff --git a/internal/appsec/emitter/waf/context.go b/internal/appsec/emitter/waf/context.go
new file mode 100644
index 0000000000..698e721880
--- /dev/null
+++ b/internal/appsec/emitter/waf/context.go
@@ -0,0 +1,160 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package waf
+
+import (
+ "context"
+ "maps"
+ "slices"
+ "sync"
+ "sync/atomic"
+
+ "github.com/DataDog/appsec-internal-go/limiter"
+ waf "github.com/DataDog/go-libddwaf/v3"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace"
+)
+
+type (
+ ContextOperation struct {
+ dyngo.Operation
+ *trace.ServiceEntrySpanOperation
+
+ // context is an atomic pointer to the current WAF context.
+ // Makes sure the calls to context.Run are safe.
+ context atomic.Pointer[waf.Context]
+ // limiter comes from the WAF feature and is used to limit the number of events as a whole.
+ limiter limiter.Limiter
+ // events is where we store WAF events received from the WAF over the course of the request.
+ events []any
+ // stacks is where we store stack traces received from the WAF over the course of the request.
+ stacks []*stacktrace.Event
+ // derivatives is where we store any span tags generated by the WAF over the course of the request.
+ derivatives map[string]any
+ // supportedAddresses is the set of addresses supported by the WAF.
+ supportedAddresses config.AddressSet
+ // mu protects the events, stacks, and derivatives, supportedAddresses slices.
+ mu sync.Mutex
+ // logOnce is used to log a warning once when a request has too many WAF events via the built-in limiter or the max value.
+ logOnce sync.Once
+ }
+
+ ContextArgs struct{}
+
+ ContextRes struct{}
+
+ // RunEvent is the type of event that should be emitted to child operations to run the WAF
+ RunEvent struct {
+ waf.RunAddressData
+ dyngo.Operation
+ }
+)
+
+func (ContextArgs) IsArgOf(*ContextOperation) {}
+func (ContextRes) IsResultOf(*ContextOperation) {}
+
+func StartContextOperation(ctx context.Context) (*ContextOperation, context.Context) {
+ entrySpanOp, ctx := trace.StartServiceEntrySpanOperation(ctx)
+ op := &ContextOperation{
+ Operation: dyngo.NewOperation(entrySpanOp),
+ ServiceEntrySpanOperation: entrySpanOp,
+ }
+ return op, dyngo.StartAndRegisterOperation(ctx, op, ContextArgs{})
+}
+
+func (op *ContextOperation) Finish(span trace.TagSetter) {
+ dyngo.FinishOperation(op, ContextRes{})
+ op.ServiceEntrySpanOperation.Finish(span)
+}
+
+func (op *ContextOperation) SwapContext(ctx *waf.Context) *waf.Context {
+ return op.context.Swap(ctx)
+}
+
+func (op *ContextOperation) SetLimiter(limiter limiter.Limiter) {
+ op.limiter = limiter
+}
+
+func (op *ContextOperation) AddEvents(events ...any) {
+ if len(events) == 0 {
+ return
+ }
+
+ if !op.limiter.Allow() {
+ log.Warn("appsec: too many WAF events, stopping further reporting")
+ return
+ }
+
+ op.mu.Lock()
+ defer op.mu.Unlock()
+
+ const maxWAFEventsPerRequest = 10
+ if len(op.events) >= maxWAFEventsPerRequest {
+ op.logOnce.Do(func() {
+ log.Warn("appsec: ignoring new WAF event due to the maximum number of security events per request was reached")
+ })
+ return
+ }
+
+ op.events = append(op.events, events...)
+}
+
+func (op *ContextOperation) AddStackTraces(stacks ...*stacktrace.Event) {
+ if len(stacks) == 0 {
+ return
+ }
+
+ op.mu.Lock()
+ defer op.mu.Unlock()
+ op.stacks = append(op.stacks, stacks...)
+}
+
+func (op *ContextOperation) AbsorbDerivatives(derivatives map[string]any) {
+ if len(derivatives) == 0 {
+ return
+ }
+
+ op.mu.Lock()
+ defer op.mu.Unlock()
+ if op.derivatives == nil {
+ op.derivatives = make(map[string]any)
+ }
+
+ for k, v := range derivatives {
+ op.derivatives[k] = v
+ }
+}
+
+func (op *ContextOperation) Derivatives() map[string]any {
+ op.mu.Lock()
+ defer op.mu.Unlock()
+ return maps.Clone(op.derivatives)
+}
+
+func (op *ContextOperation) Events() []any {
+ op.mu.Lock()
+ defer op.mu.Unlock()
+ return slices.Clone(op.events)
+}
+
+func (op *ContextOperation) StackTraces() []*stacktrace.Event {
+ op.mu.Lock()
+ defer op.mu.Unlock()
+ return slices.Clone(op.stacks)
+}
+
+func (op *ContextOperation) OnEvent(event RunEvent) {
+ op.Run(event.Operation, event.RunAddressData)
+}
+
+func (op *ContextOperation) SetSupportedAddresses(addrs config.AddressSet) {
+ op.supportedAddresses = addrs
+}
diff --git a/internal/appsec/emitter/waf/run.go b/internal/appsec/emitter/waf/run.go
new file mode 100644
index 0000000000..a77abd5b20
--- /dev/null
+++ b/internal/appsec/emitter/waf/run.go
@@ -0,0 +1,78 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package waf
+
+import (
+ "context"
+ "errors"
+ "maps"
+
+ waf "github.com/DataDog/go-libddwaf/v3"
+ wafErrors "github.com/DataDog/go-libddwaf/v3/errors"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/appsec/events"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// Run runs the WAF with the given address data and sends the results to the event receiver
+// the event receiver can be the same os the method receiver but not always
+// the event receiver is the one that will receive the actions events generated by the WAF
+func (op *ContextOperation) Run(eventReceiver dyngo.Operation, addrs waf.RunAddressData) {
+ ctx := op.context.Load()
+ if ctx == nil { // Context was closed concurrently
+ return
+ }
+
+ // Remove unsupported addresses in case the listener was registered but some addresses are still unsupported
+ // Technically the WAF does this step for us but doing this check before calling the WAF makes us skip encoding huge
+ // values that may be discarded by the WAF afterward.
+ // e.g. gRPC response body address that is not in the default ruleset but will still be sent to the WAF and may be huge
+ for _, addrType := range []map[string]any{addrs.Persistent, addrs.Ephemeral} {
+ maps.DeleteFunc(addrType, func(key string, _ any) bool {
+ _, ok := op.supportedAddresses[key]
+ return !ok
+ })
+ }
+
+ result, err := ctx.Run(addrs)
+ if errors.Is(err, wafErrors.ErrTimeout) {
+ log.Debug("appsec: WAF timeout value reached: %v", err)
+ } else if err != nil {
+ log.Error("appsec: unexpected WAF error: %v", err)
+ }
+
+ op.AddEvents(result.Events...)
+ op.AbsorbDerivatives(result.Derivatives)
+
+ actions.SendActionEvents(eventReceiver, result.Actions)
+
+ if result.HasEvents() {
+ log.Debug("appsec: WAF detected a suspicious event")
+ }
+}
+
+// RunSimple runs the WAF with the given address data and returns an error that should be forwarded to the caller
+func RunSimple(ctx context.Context, addrs waf.RunAddressData, errorLog string) error {
+ parent, _ := dyngo.FromContext(ctx)
+ if parent == nil {
+ log.Error(errorLog)
+ return nil
+ }
+
+ var err error
+ op := dyngo.NewOperation(parent)
+ dyngo.OnData(op, func(e *events.BlockingSecurityEvent) {
+ err = e
+ })
+ dyngo.EmitData(op, RunEvent{
+ Operation: op,
+ RunAddressData: addrs,
+ })
+ return err
+}
diff --git a/internal/appsec/features.go b/internal/appsec/features.go
new file mode 100644
index 0000000000..ca286de742
--- /dev/null
+++ b/internal/appsec/features.go
@@ -0,0 +1,81 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package appsec
+
+import (
+ "errors"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/graphqlsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/grpcsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/trace"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/usersec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+var features = []listener.NewFeature{
+ trace.NewAppsecSpanTransport,
+ waf.NewWAFFeature,
+ httpsec.NewHTTPSecFeature,
+ grpcsec.NewGRPCSecFeature,
+ graphqlsec.NewGraphQLSecFeature,
+ usersec.NewUserSecFeature,
+ sqlsec.NewSQLSecFeature,
+ ossec.NewOSSecFeature,
+ httpsec.NewSSRFProtectionFeature,
+}
+
+func (a *appsec) SwapRootOperation() error {
+ newRoot := dyngo.NewRootOperation()
+ newFeatures := make([]listener.Feature, 0, len(features))
+ var featureErrors []error
+ for _, newFeature := range features {
+ feature, err := newFeature(a.cfg, newRoot)
+ if err != nil {
+ featureErrors = append(featureErrors, err)
+ continue
+ }
+
+ // If error is nil and feature is nil, it means the feature did not activate itself
+ if feature == nil {
+ continue
+ }
+
+ newFeatures = append(newFeatures, feature)
+ }
+
+ err := errors.Join(featureErrors...)
+ if err != nil {
+ for _, feature := range newFeatures {
+ feature.Stop()
+ }
+ return err
+ }
+
+ a.featuresMu.Lock()
+ defer a.featuresMu.Unlock()
+
+ oldFeatures := a.features
+ a.features = newFeatures
+
+ log.Debug("appsec: stopping the following features: %v", oldFeatures)
+ log.Debug("appsec: starting the following features: %v", newFeatures)
+
+ dyngo.SwapRootOperation(newRoot)
+
+ log.Debug("appsec: swapped root operation")
+
+ for _, oldFeature := range oldFeatures {
+ oldFeature.Stop()
+ }
+
+ return nil
+}
diff --git a/internal/appsec/listener/feature.go b/internal/appsec/listener/feature.go
new file mode 100644
index 0000000000..6f07e2cd56
--- /dev/null
+++ b/internal/appsec/listener/feature.go
@@ -0,0 +1,24 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package listener
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+)
+
+// Feature is an interface that represents a feature that can be started and stopped.
+type Feature interface {
+ // String should return a user-friendly name for the feature.
+ String() string
+ // Stop stops the feature.
+ Stop()
+}
+
+// NewFeature is a function that creates a new feature.
+// The error returned will be fatal for the application if not nil.
+// If both the feature and the error are nil, the feature will be considered inactive.
+type NewFeature func(*config.Config, dyngo.Operation) (Feature, error)
diff --git a/internal/appsec/listener/graphqlsec/graphql.go b/internal/appsec/listener/graphqlsec/graphql.go
index d12f224b8f..cb97baf3ea 100644
--- a/internal/appsec/listener/graphqlsec/graphql.go
+++ b/internal/appsec/listener/graphqlsec/graphql.go
@@ -1,130 +1,43 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
+// Copyright 2024 Datadog, Inc.
package graphqlsec
import (
- "sync"
-
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec/types"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/graphqlsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec"
- shared "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
-
- "github.com/DataDog/appsec-internal-go/limiter"
- waf "github.com/DataDog/go-libddwaf/v3"
-)
-
-// GraphQL rule addresses currently supported by the WAF
-const (
- graphQLServerResolverAddr = "graphql.server.resolver"
)
-// List of GraphQL rule addresses currently supported by the WAF
-var supportedAddresses = listener.AddressSet{
- graphQLServerResolverAddr: {},
- httpsec.ServerIoNetURLAddr: {},
-}
-
-// Install registers the GraphQL WAF Event Listener on the given root operation.
-func Install(wafHandle *waf.Handle, cfg *config.Config, lim limiter.Limiter, root dyngo.Operation) {
- if listener := newWafEventListener(wafHandle, cfg, lim); listener != nil {
- log.Debug("appsec: registering the GraphQL WAF Event Listener")
- dyngo.On(root, listener.onEvent)
- }
-}
+type Feature struct{}
-type wafEventListener struct {
- wafHandle *waf.Handle
- config *config.Config
- addresses listener.AddressSet
- limiter limiter.Limiter
- wafDiags waf.Diagnostics
- once sync.Once
+func (*Feature) String() string {
+ return "GraphQL Security"
}
-func newWafEventListener(wafHandle *waf.Handle, cfg *config.Config, limiter limiter.Limiter) *wafEventListener {
- if wafHandle == nil {
- log.Debug("appsec: no WAF Handle available, the GraphQL WAF Event Listener will not be registered")
- return nil
- }
-
- addresses := listener.FilterAddressSet(supportedAddresses, wafHandle)
- if len(addresses) == 0 {
- log.Debug("appsec: no supported GraphQL address is used by currently loaded WAF rules, the GraphQL WAF Event Listener will not be registered")
- return nil
- }
+func (*Feature) Stop() {}
- return &wafEventListener{
- wafHandle: wafHandle,
- config: cfg,
- addresses: addresses,
- limiter: limiter,
- wafDiags: wafHandle.Diagnostics(),
- }
+func (f *Feature) OnResolveField(op *graphqlsec.ResolveOperation, args graphqlsec.ResolveOperationArgs) {
+ dyngo.EmitData(op, waf.RunEvent{
+ Operation: op,
+ RunAddressData: addresses.NewAddressesBuilder().
+ WithGraphQLResolver(args.FieldName, args.Arguments).
+ Build(),
+ })
}
-// NewWAFEventListener returns the WAF event listener to register in order
-// to enable it.
-func (l *wafEventListener) onEvent(request *types.RequestOperation, _ types.RequestOperationArgs) {
- wafCtx, err := l.wafHandle.NewContextWithBudget(l.config.WAFTimeout)
- if err != nil {
- log.Debug("appsec: could not create budgeted WAF context: %v", err)
- }
- // Early return in the following cases:
- // - wafCtx is nil, meaning it was concurrently released
- // - err is not nil, meaning context creation failed
- if wafCtx == nil || err != nil {
- return
- }
-
- if _, ok := l.addresses[httpsec.ServerIoNetURLAddr]; ok {
- httpsec.RegisterRoundTripperListener(request, &request.SecurityEventsHolder, wafCtx, l.limiter)
+func NewGraphQLSecFeature(config *config.Config, rootOp dyngo.Operation) (listener.Feature, error) {
+ if !config.SupportedAddresses.AnyOf(addresses.GraphQLServerResolverAddr) {
+ return nil, nil
}
- // Add span tags notifying this trace is AppSec-enabled
- trace.SetAppSecEnabledTags(request)
- l.once.Do(func() {
- shared.AddRulesMonitoringTags(request, &l.wafDiags)
- request.SetTag(ext.ManualKeep, samplernames.AppSec)
- })
-
- dyngo.On(request, func(query *types.ExecutionOperation, args types.ExecutionOperationArgs) {
- dyngo.On(query, func(field *types.ResolveOperation, args types.ResolveOperationArgs) {
- if _, found := l.addresses[graphQLServerResolverAddr]; found {
- wafResult := shared.RunWAF(
- wafCtx,
- waf.RunAddressData{
- Ephemeral: map[string]any{
- graphQLServerResolverAddr: map[string]any{args.FieldName: args.Arguments},
- },
- },
- )
- shared.AddSecurityEvents(&field.SecurityEventsHolder, l.limiter, wafResult.Events)
- }
+ feature := &Feature{}
+ dyngo.On(rootOp, feature.OnResolveField)
- dyngo.OnFinish(field, func(field *types.ResolveOperation, res types.ResolveOperationRes) {
- trace.SetEventSpanTags(field, field.Events())
- })
- })
-
- dyngo.OnFinish(query, func(query *types.ExecutionOperation, res types.ExecutionOperationRes) {
- trace.SetEventSpanTags(query, query.Events())
- })
- })
-
- dyngo.OnFinish(request, func(request *types.RequestOperation, res types.RequestOperationRes) {
- defer wafCtx.Close()
-
- shared.AddWAFMonitoringTags(request, l.wafDiags.Version, wafCtx.Stats().Metrics())
- trace.SetEventSpanTags(request, request.Events())
- })
+ return feature, nil
}
diff --git a/internal/appsec/listener/grpcsec/grpc.go b/internal/appsec/listener/grpcsec/grpc.go
index 861021e86f..52bb2c0fb7 100644
--- a/internal/appsec/listener/grpcsec/grpc.go
+++ b/internal/appsec/listener/grpcsec/grpc.go
@@ -1,209 +1,75 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
+// Copyright 2024 Datadog, Inc.
package grpcsec
import (
- "sync"
-
- "go.uber.org/atomic"
-
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec/types"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/grpcsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec"
- shared "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
-
- "github.com/DataDog/appsec-internal-go/limiter"
- waf "github.com/DataDog/go-libddwaf/v3"
)
-// gRPC rule addresses currently supported by the WAF
-const (
- GRPCServerMethodAddr = "grpc.server.method"
- GRPCServerRequestMessageAddr = "grpc.server.request.message"
- GRPCServerRequestMetadataAddr = "grpc.server.request.metadata"
-)
+type Feature struct{}
-// List of gRPC rule addresses currently supported by the WAF
-var supportedAddresses = listener.AddressSet{
- GRPCServerMethodAddr: {},
- GRPCServerRequestMessageAddr: {},
- GRPCServerRequestMetadataAddr: {},
- httpsec.HTTPClientIPAddr: {},
- httpsec.UserIDAddr: {},
- httpsec.ServerIoNetURLAddr: {},
- ossec.ServerIOFSFileAddr: {},
- sqlsec.ServerDBStatementAddr: {},
- sqlsec.ServerDBTypeAddr: {},
+func (*Feature) String() string {
+ return "gRPC Security"
}
-// Install registers the gRPC WAF Event Listener on the given root operation.
-func Install(wafHandle *waf.Handle, cfg *config.Config, lim limiter.Limiter, root dyngo.Operation) {
- if listener := newWafEventListener(wafHandle, cfg, lim); listener != nil {
- log.Debug("appsec: registering the gRPC WAF Event Listener")
- dyngo.On(root, listener.onEvent)
+func (*Feature) Stop() {}
+
+func NewGRPCSecFeature(config *config.Config, rootOp dyngo.Operation) (listener.Feature, error) {
+ if !config.SupportedAddresses.AnyOf(
+ addresses.ClientIPAddr,
+ addresses.GRPCServerMethodAddr,
+ addresses.GRPCServerRequestMessageAddr,
+ addresses.GRPCServerRequestMetadataAddr,
+ addresses.GRPCServerResponseMessageAddr,
+ addresses.GRPCServerResponseMetadataHeadersAddr,
+ addresses.GRPCServerResponseMetadataTrailersAddr,
+ addresses.GRPCServerResponseStatusCodeAddr) {
+ return nil, nil
}
-}
-type wafEventListener struct {
- wafHandle *waf.Handle
- config *config.Config
- addresses listener.AddressSet
- limiter limiter.Limiter
- wafDiags waf.Diagnostics
- once sync.Once
+ feature := &Feature{}
+ dyngo.On(rootOp, feature.OnStart)
+ dyngo.OnFinish(rootOp, feature.OnFinish)
+ return feature, nil
}
-func newWafEventListener(wafHandle *waf.Handle, cfg *config.Config, limiter limiter.Limiter) *wafEventListener {
- if wafHandle == nil {
- log.Debug("appsec: no WAF Handle available, the gRPC WAF Event Listener will not be registered")
- return nil
- }
+func (f *Feature) OnStart(op *grpcsec.HandlerOperation, args grpcsec.HandlerOperationArgs) {
+ ipTags, clientIP := httpsec.ClientIPTags(args.Metadata, false, args.RemoteAddr)
+ log.Debug("appsec: http client ip detection returned `%s`", clientIP)
- addresses := listener.FilterAddressSet(supportedAddresses, wafHandle)
- if len(addresses) == 0 {
- log.Debug("appsec: no supported gRPC address is used by currently loaded WAF rules, the gRPC WAF Event Listener will not be registered")
- return nil
- }
+ op.SetStringTags(ipTags)
- return &wafEventListener{
- wafHandle: wafHandle,
- config: cfg,
- addresses: addresses,
- limiter: limiter,
- wafDiags: wafHandle.Diagnostics(),
- }
-}
+ SetRequestMetadataTags(op, args.Metadata)
-// NewWAFEventListener returns the WAF event listener to register in order to enable it, listening to gRPC handler
-// events.
-func (l *wafEventListener) onEvent(op *types.HandlerOperation, handlerArgs types.HandlerOperationArgs) {
- // Limit the maximum number of security events, as a streaming RPC could
- // receive unlimited number of messages where we could find security events
- var (
- nbEvents atomic.Uint32
- logOnce sync.Once // per request
+ op.Run(op,
+ addresses.NewAddressesBuilder().
+ WithGRPCMethod(args.Method).
+ WithGRPCRequestMetadata(args.Metadata).
+ WithClientIP(clientIP).
+ Build(),
)
- addEvents := func(events []any) {
- const maxWAFEventsPerRequest = 10
- if nbEvents.Load() >= maxWAFEventsPerRequest {
- logOnce.Do(func() {
- log.Debug("appsec: ignoring new WAF event due to the maximum number of security events per grpc call reached")
- })
- return
- }
- nbEvents.Add(uint32(len(events)))
- shared.AddSecurityEvents(&op.SecurityEventsHolder, l.limiter, events)
- }
-
- wafCtx, err := l.wafHandle.NewContextWithBudget(l.config.WAFTimeout)
- if err != nil {
- log.Debug("appsec: could not create budgeted WAF context: %v", err)
- }
- // Early return in the following cases:
- // - wafCtx is nil, meaning it was concurrently released
- // - err is not nil, meaning context creation failed
- if wafCtx == nil || err != nil {
- return
- }
-
- if httpsec.SSRFAddressesPresent(l.addresses) {
- httpsec.RegisterRoundTripperListener(op, &op.SecurityEventsHolder, wafCtx, l.limiter)
- }
-
- if ossec.OSAddressesPresent(l.addresses) {
- ossec.RegisterOpenListener(op, &op.SecurityEventsHolder, wafCtx, l.limiter)
- }
-
- if sqlsec.SQLAddressesPresent(l.addresses) {
- sqlsec.RegisterSQLListener(op, &op.SecurityEventsHolder, wafCtx, l.limiter)
- }
-
- // Listen to the UserID address if the WAF rules are using it
- if l.isSecAddressListened(httpsec.UserIDAddr) {
- // UserIDOperation happens when appsec.SetUser() is called. We run the WAF and apply actions to
- // see if the associated user should be blocked. Since we don't control the execution flow in this case
- // (SetUser is SDK), we delegate the responsibility of interrupting the handler to the user.
- dyngo.On(op, shared.MakeWAFRunListener(&op.SecurityEventsHolder, wafCtx, l.limiter, func(args sharedsec.UserIDOperationArgs) waf.RunAddressData {
- return waf.RunAddressData{Persistent: map[string]any{httpsec.UserIDAddr: args.UserID}}
- }))
- }
-
- values := make(map[string]any, 2) // 2 because the method and client ip addresses are commonly present in the rules
- if l.isSecAddressListened(GRPCServerMethodAddr) {
- // Note that this address is passed asap for the passlist, which are created per grpc method
- values[GRPCServerMethodAddr] = handlerArgs.Method
- }
- if l.isSecAddressListened(httpsec.HTTPClientIPAddr) && handlerArgs.ClientIP.IsValid() {
- values[httpsec.HTTPClientIPAddr] = handlerArgs.ClientIP.String()
- }
-
- wafResult := shared.RunWAF(wafCtx, waf.RunAddressData{Persistent: values})
- if wafResult.HasEvents() {
- addEvents(wafResult.Events)
- log.Debug("appsec: WAF detected an attack before executing the request")
- }
- if wafResult.HasActions() {
- interrupt := shared.ProcessActions(op, wafResult.Actions)
- if interrupt {
- wafCtx.Close()
- return
- }
- }
-
- // When the gRPC handler receives a message
- dyngo.OnFinish(op, func(_ types.ReceiveOperation, res types.ReceiveOperationRes) {
- // Run the WAF on the rule addresses available and listened to by the sec rules
- var values waf.RunAddressData
- // Add the gRPC message to the values if the WAF rules are using it.
- // Note that it is an ephemeral address as they can happen more than once per RPC.
- if l.isSecAddressListened(GRPCServerRequestMessageAddr) {
- values.Ephemeral = map[string]any{GRPCServerRequestMessageAddr: res.Message}
- }
-
- // Add the metadata to the values if the WAF rules are using it.
- if l.isSecAddressListened(GRPCServerRequestMetadataAddr) {
- if md := handlerArgs.Metadata; len(md) > 0 {
- values.Persistent = map[string]any{GRPCServerRequestMetadataAddr: md}
- }
- }
-
- // Run the WAF, ignoring the returned actions - if any - since blocking after the request handler's
- // response is not supported at the moment.
- wafResult := shared.RunWAF(wafCtx, values)
- if wafResult.HasEvents() {
- log.Debug("appsec: attack detected by the grpc waf")
- addEvents(wafResult.Events)
- }
- if wafResult.HasActions() {
- shared.ProcessActions(op, wafResult.Actions)
- }
- })
-
- // When the gRPC handler finishes
- dyngo.OnFinish(op, func(op *types.HandlerOperation, _ types.HandlerOperationRes) {
- defer wafCtx.Close()
+}
- shared.AddWAFMonitoringTags(op, l.wafDiags.Version, wafCtx.Stats().Metrics())
- // Log the following metrics once per instantiation of a WAF handle
- l.once.Do(func() {
- shared.AddRulesMonitoringTags(op, &l.wafDiags)
- op.SetTag(ext.ManualKeep, samplernames.AppSec)
- })
- })
+func (f *Feature) OnFinish(op *grpcsec.HandlerOperation, res grpcsec.HandlerOperationRes) {
+ op.Run(op,
+ addresses.NewAddressesBuilder().
+ WithGRPCResponseStatusCode(res.StatusCode).
+ Build(),
+ )
}
-func (l *wafEventListener) isSecAddressListened(addr string) bool {
- _, listened := l.addresses[addr]
- return listened
+func SetRequestMetadataTags(span trace.TagSetter, metadata map[string][]string) {
+ for h, v := range httpsec.NormalizeHTTPHeaders(metadata) {
+ span.SetTag("grpc.metadata."+h, v)
+ }
}
diff --git a/internal/appsec/trace/grpctrace/grpc_test.go b/internal/appsec/listener/grpcsec/grpc_test.go
similarity index 74%
rename from internal/appsec/trace/grpctrace/grpc_test.go
rename to internal/appsec/listener/grpcsec/grpc_test.go
index 6536fabda2..bfb8520b5c 100644
--- a/internal/appsec/trace/grpctrace/grpc_test.go
+++ b/internal/appsec/listener/grpcsec/grpc_test.go
@@ -1,19 +1,38 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
+// Copyright 2024 Datadog, Inc.
-package grpctrace
+package grpcsec
import (
"fmt"
"testing"
- testlib "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/_testlib"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
"github.com/stretchr/testify/require"
)
+type MockSpan struct {
+ Tags map[string]any
+}
+
+func (m *MockSpan) SetTag(key string, value interface{}) {
+ if m.Tags == nil {
+ m.Tags = make(map[string]any)
+ }
+ if key == ext.ManualKeep {
+ if value == samplernames.AppSec {
+ m.Tags[ext.ManualKeep] = true
+ }
+ } else {
+ m.Tags[key] = value
+ }
+}
+
func TestTags(t *testing.T) {
for _, eventCase := range []struct {
name string
@@ -74,8 +93,8 @@ func TestTags(t *testing.T) {
} {
metadataCase := metadataCase
t.Run(fmt.Sprintf("%s-%s", eventCase.name, metadataCase.name), func(t *testing.T) {
- var span testlib.MockSpan
- err := setSecurityEventsTags(&span, eventCase.events)
+ var span MockSpan
+ err := waf.SetEventSpanTags(&span, eventCase.events)
if eventCase.expectedError {
require.Error(t, err)
return
@@ -84,7 +103,7 @@ func TestTags(t *testing.T) {
SetRequestMetadataTags(&span, metadataCase.md)
if eventCase.events != nil {
- testlib.RequireContainsMapSubset(t, span.Tags, map[string]interface{}{
+ require.Subset(t, span.Tags, map[string]interface{}{
"_dd.appsec.json": eventCase.expectedTag,
"manual.keep": true,
"appsec.event": true,
@@ -93,10 +112,8 @@ func TestTags(t *testing.T) {
}
if l := len(metadataCase.expectedTags); l > 0 {
- testlib.RequireContainsMapSubset(t, span.Tags, metadataCase.expectedTags)
+ require.Subset(t, span.Tags, metadataCase.expectedTags)
}
-
- require.False(t, span.Finished)
})
}
}
diff --git a/internal/appsec/listener/httpsec/http.go b/internal/appsec/listener/httpsec/http.go
index c458931336..08b9e853dd 100644
--- a/internal/appsec/listener/httpsec/http.go
+++ b/internal/appsec/listener/httpsec/http.go
@@ -1,233 +1,96 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
+// Copyright 2024 Datadog, Inc.
package httpsec
import (
- "fmt"
"math/rand"
- "sync"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "github.com/DataDog/appsec-internal-go/appsec"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener"
+
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/ossec"
- shared "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sqlsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
-
- "github.com/DataDog/appsec-internal-go/limiter"
- waf "github.com/DataDog/go-libddwaf/v3"
-)
-
-// HTTP rule addresses currently supported by the WAF
-const (
- ServerRequestMethodAddr = "server.request.method"
- ServerRequestRawURIAddr = "server.request.uri.raw"
- ServerRequestHeadersNoCookiesAddr = "server.request.headers.no_cookies"
- ServerRequestCookiesAddr = "server.request.cookies"
- ServerRequestQueryAddr = "server.request.query"
- ServerRequestPathParamsAddr = "server.request.path_params"
- ServerRequestBodyAddr = "server.request.body"
- ServerResponseStatusAddr = "server.response.status"
- ServerResponseHeadersNoCookiesAddr = "server.response.headers.no_cookies"
- HTTPClientIPAddr = "http.client_ip"
- UserIDAddr = "usr.id"
- ServerIoNetURLAddr = "server.io.net.url"
)
-// List of HTTP rule addresses currently supported by the WAF
-var supportedAddresses = listener.AddressSet{
- ServerRequestMethodAddr: {},
- ServerRequestRawURIAddr: {},
- ServerRequestHeadersNoCookiesAddr: {},
- ServerRequestCookiesAddr: {},
- ServerRequestQueryAddr: {},
- ServerRequestPathParamsAddr: {},
- ServerRequestBodyAddr: {},
- ServerResponseStatusAddr: {},
- ServerResponseHeadersNoCookiesAddr: {},
- HTTPClientIPAddr: {},
- UserIDAddr: {},
- ServerIoNetURLAddr: {},
- ossec.ServerIOFSFileAddr: {},
- sqlsec.ServerDBStatementAddr: {},
- sqlsec.ServerDBTypeAddr: {},
-}
-
-// Install registers the HTTP WAF Event Listener on the given root operation.
-func Install(wafHandle *waf.Handle, cfg *config.Config, lim limiter.Limiter, root dyngo.Operation) {
- if listener := newWafEventListener(wafHandle, cfg, lim); listener != nil {
- log.Debug("appsec: registering the HTTP WAF Event Listener")
- dyngo.On(root, listener.onEvent)
- }
+type Feature struct {
+ APISec appsec.APISecConfig
}
-type wafEventListener struct {
- wafHandle *waf.Handle
- config *config.Config
- addresses listener.AddressSet
- limiter limiter.Limiter
- wafDiags waf.Diagnostics
- once sync.Once
+func (*Feature) String() string {
+ return "HTTP Security"
}
-// newWAFEventListener returns the WAF event listener to register in order to enable it.
-func newWafEventListener(wafHandle *waf.Handle, cfg *config.Config, limiter limiter.Limiter) *wafEventListener {
- if wafHandle == nil {
- log.Debug("appsec: no WAF Handle available, the HTTP WAF Event Listener will not be registered")
- return nil
+func (*Feature) Stop() {}
+
+func NewHTTPSecFeature(config *config.Config, rootOp dyngo.Operation) (listener.Feature, error) {
+ if !config.SupportedAddresses.AnyOf(addresses.ServerRequestMethodAddr,
+ addresses.ServerRequestRawURIAddr,
+ addresses.ServerRequestHeadersNoCookiesAddr,
+ addresses.ServerRequestCookiesAddr,
+ addresses.ServerRequestQueryAddr,
+ addresses.ServerRequestPathParamsAddr,
+ addresses.ServerRequestBodyAddr,
+ addresses.ServerResponseStatusAddr,
+ addresses.ServerResponseHeadersNoCookiesAddr) {
+ return nil, nil
}
- addresses := listener.FilterAddressSet(supportedAddresses, wafHandle)
- if len(addresses) == 0 {
- log.Debug("appsec: no supported HTTP address is used by currently loaded WAF rules, the HTTP WAF Event Listener will not be registered")
- return nil
+ feature := &Feature{
+ APISec: config.APISec,
}
- return &wafEventListener{
- wafHandle: wafHandle,
- config: cfg,
- addresses: addresses,
- limiter: limiter,
- wafDiags: wafHandle.Diagnostics(),
- }
+ dyngo.On(rootOp, feature.OnRequest)
+ dyngo.OnFinish(rootOp, feature.OnResponse)
+ return feature, nil
}
-func (l *wafEventListener) onEvent(op *types.Operation, args types.HandlerOperationArgs) {
- wafCtx, err := l.wafHandle.NewContextWithBudget(l.config.WAFTimeout)
- if err != nil {
- log.Debug("appsec: could not create budgeted WAF context: %v", err)
- }
- // Early return in the following cases:
- // - wafCtx is nil, meaning it was concurrently released
- // - err is not nil, meaning context creation failed
- if wafCtx == nil || err != nil {
- // The WAF event listener got concurrently released
- return
- }
-
- if SSRFAddressesPresent(l.addresses) {
- dyngo.On(op, shared.MakeWAFRunListener(&op.SecurityEventsHolder, wafCtx, l.limiter, func(args types.RoundTripOperationArgs) waf.RunAddressData {
- return waf.RunAddressData{Ephemeral: map[string]any{ServerIoNetURLAddr: args.URL}}
- }))
- }
-
- if ossec.OSAddressesPresent(l.addresses) {
- ossec.RegisterOpenListener(op, &op.SecurityEventsHolder, wafCtx, l.limiter)
- }
-
- if sqlsec.SQLAddressesPresent(l.addresses) {
- sqlsec.RegisterSQLListener(op, &op.SecurityEventsHolder, wafCtx, l.limiter)
- }
-
- if _, ok := l.addresses[UserIDAddr]; ok {
- // OnUserIDOperationStart happens when appsec.SetUser() is called. We run the WAF and apply actions to
- // see if the associated user should be blocked. Since we don't control the execution flow in this case
- // (SetUser is SDK), we delegate the responsibility of interrupting the handler to the user.
- dyngo.On(op, shared.MakeWAFRunListener(&op.SecurityEventsHolder, wafCtx, l.limiter, func(args sharedsec.UserIDOperationArgs) waf.RunAddressData {
- return waf.RunAddressData{Persistent: map[string]any{UserIDAddr: args.UserID}}
- }))
- }
+func (feature *Feature) OnRequest(op *httpsec.HandlerOperation, args httpsec.HandlerOperationArgs) {
+ tags, ip := ClientIPTags(args.Headers, true, args.RemoteAddr)
+ log.Debug("appsec: http client ip detection returned `%s` given the http headers `%v`", ip, args.Headers)
+
+ op.SetStringTags(tags)
+ headers := headersRemoveCookies(args.Headers)
+ headers["host"] = []string{args.Host}
+
+ setRequestHeadersTags(op, headers)
+
+ op.Run(op,
+ addresses.NewAddressesBuilder().
+ WithMethod(args.Method).
+ WithRawURI(args.RequestURI).
+ WithHeadersNoCookies(headers).
+ WithCookies(args.Cookies).
+ WithQuery(args.QueryParams).
+ WithPathParams(args.PathParams).
+ WithClientIP(ip).
+ Build(),
+ )
+}
- values := make(map[string]any, 8)
- for addr := range l.addresses {
- switch addr {
- case HTTPClientIPAddr:
- if args.ClientIP.IsValid() {
- values[HTTPClientIPAddr] = args.ClientIP.String()
- }
- case ServerRequestMethodAddr:
- values[ServerRequestMethodAddr] = args.Method
- case ServerRequestRawURIAddr:
- values[ServerRequestRawURIAddr] = args.RequestURI
- case ServerRequestHeadersNoCookiesAddr:
- if headers := args.Headers; headers != nil {
- values[ServerRequestHeadersNoCookiesAddr] = headers
- }
- case ServerRequestCookiesAddr:
- if cookies := args.Cookies; cookies != nil {
- values[ServerRequestCookiesAddr] = cookies
- }
- case ServerRequestQueryAddr:
- if query := args.Query; query != nil {
- values[ServerRequestQueryAddr] = query
- }
- case ServerRequestPathParamsAddr:
- if pathParams := args.PathParams; pathParams != nil {
- values[ServerRequestPathParamsAddr] = pathParams
- }
- }
- }
+func (feature *Feature) OnResponse(op *httpsec.HandlerOperation, resp httpsec.HandlerOperationRes) {
+ headers := headersRemoveCookies(resp.Headers)
+ setResponseHeadersTags(op, headers)
- wafResult := shared.RunWAF(wafCtx, waf.RunAddressData{Persistent: values})
- if wafResult.HasActions() || wafResult.HasEvents() {
- interrupt := shared.ProcessActions(op, wafResult.Actions)
- shared.AddSecurityEvents(&op.SecurityEventsHolder, l.limiter, wafResult.Events)
- log.Debug("appsec: WAF detected an attack before executing the request")
- if interrupt {
- wafCtx.Close()
- return
- }
- }
+ builder := addresses.NewAddressesBuilder().
+ WithResponseHeadersNoCookies(headers).
+ WithResponseStatus(resp.StatusCode)
- if _, ok := l.addresses[ServerRequestBodyAddr]; ok {
- dyngo.On(op, shared.MakeWAFRunListener(&op.SecurityEventsHolder, wafCtx, l.limiter, func(args types.SDKBodyOperationArgs) waf.RunAddressData {
- return waf.RunAddressData{Persistent: map[string]any{ServerRequestBodyAddr: args.Body}}
- }))
+ if feature.canExtractSchemas() {
+ builder = builder.ExtractSchema()
}
- dyngo.OnFinish(op, func(op *types.Operation, res types.HandlerOperationRes) {
- defer wafCtx.Close()
-
- values = make(map[string]any, 3)
- if l.canExtractSchemas() {
- // This address will be passed as persistent. The WAF will keep it in store and trigger schema extraction
- // for each run.
- values["waf.context.processor"] = map[string]any{"extract-schema": true}
- }
-
- if _, ok := l.addresses[ServerResponseStatusAddr]; ok {
- // serverResponseStatusAddr is a string address, so we must format the status code...
- values[ServerResponseStatusAddr] = fmt.Sprintf("%d", res.Status)
- }
-
- if _, ok := l.addresses[ServerResponseHeadersNoCookiesAddr]; ok && res.Headers != nil {
- values[ServerResponseHeadersNoCookiesAddr] = res.Headers
- }
-
- // Run the WAF, ignoring the returned actions - if any - since blocking after the request handler's
- // response is not supported at the moment.
- wafResult := shared.RunWAF(wafCtx, waf.RunAddressData{Persistent: values})
-
- // Add WAF metrics.
- shared.AddWAFMonitoringTags(op, l.wafDiags.Version, wafCtx.Stats().Metrics())
-
- // Add the following metrics once per instantiation of a WAF handle
- l.once.Do(func() {
- shared.AddRulesMonitoringTags(op, &l.wafDiags)
- op.SetTag(ext.ManualKeep, samplernames.AppSec)
- })
-
- // Log the attacks if any
- if wafResult.HasEvents() {
- log.Debug("appsec: attack detected by the waf")
- shared.AddSecurityEvents(&op.SecurityEventsHolder, l.limiter, wafResult.Events)
- }
- for tag, value := range wafResult.Derivatives {
- op.AddSerializableTag(tag, value)
- }
- })
+ op.Run(op, builder.Build())
}
// canExtractSchemas checks that API Security is enabled and that sampling rate
// allows extracting schemas
-func (l *wafEventListener) canExtractSchemas() bool {
- return l.config.APISec.Enabled && l.config.APISec.SampleRate >= rand.Float64()
+func (feature *Feature) canExtractSchemas() bool {
+ return feature.APISec.Enabled && feature.APISec.SampleRate >= rand.Float64()
}
diff --git a/internal/appsec/trace/httptrace/http.go b/internal/appsec/listener/httpsec/request.go
similarity index 71%
rename from internal/appsec/trace/httptrace/http.go
rename to internal/appsec/listener/httpsec/request.go
index 25ed275ecf..abd3983183 100644
--- a/internal/appsec/trace/httptrace/http.go
+++ b/internal/appsec/listener/httpsec/request.go
@@ -1,19 +1,19 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
+// Copyright 2024 Datadog, Inc.
-package httptrace
+package httpsec
import (
+ "net/http"
"net/netip"
"os"
"strings"
"github.com/DataDog/appsec-internal-go/httpsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace"
)
const (
@@ -23,7 +23,7 @@ const (
var (
// defaultIPHeaders is the default list of IP-related headers leveraged to
- // retrieve the public client IP address in ClientIP.
+ // retrieve the public client IP address in RemoteAddr.
defaultIPHeaders = []string{
"x-forwarded-for",
"x-real-ip",
@@ -34,7 +34,7 @@ var (
"x-cluster-client-ip",
"fastly-client-ip",
"cf-connecting-ip",
- "cf-connecting-ip6",
+ "cf-connecting-ipv6",
}
// defaultCollectedHeaders is the default list of HTTP headers collected as
@@ -67,7 +67,7 @@ var (
collectedHeadersLookupMap map[string]struct{}
// monitoredClientIPHeadersCfg is the list of IP-related headers leveraged to
- // retrieve the public client IP address in ClientIP. This is defined at init
+ // retrieve the public client IP address in RemoteAddr. This is defined at init
// time in function of the value of the envClientIPHeader environment variable.
monitoredClientIPHeadersCfg []string
)
@@ -75,7 +75,7 @@ var (
// ClientIPTags returns the resulting Datadog span tags `http.client_ip`
// containing the client IP and `network.client.ip` containing the remote IP.
// The tags are present only if a valid ip address has been returned by
-// ClientIP().
+// RemoteAddr().
func ClientIPTags(headers map[string][]string, hasCanonicalHeaders bool, remoteAddr string) (tags map[string]string, clientIP netip.Addr) {
remoteIP, clientIP := httpsec.ClientIP(headers, hasCanonicalHeaders, remoteAddr, monitoredClientIPHeadersCfg)
tags = httpsec.ClientIPTags(remoteIP, clientIP)
@@ -101,6 +101,20 @@ func NormalizeHTTPHeaders(headers map[string][]string) (normalized map[string]st
return normalized
}
+// Remove cookies from the request headers and return the map of headers
+// Used from `server.request.headers.no_cookies` and server.response.headers.no_cookies` addresses for the WAF
+func headersRemoveCookies(headers http.Header) map[string][]string {
+ headersNoCookies := make(http.Header, len(headers))
+ for k, v := range headers {
+ k := strings.ToLower(k)
+ if k == "cookie" {
+ continue
+ }
+ headersNoCookies[k] = v
+ }
+ return headersNoCookies
+}
+
func normalizeHTTPHeaderName(name string) string {
return strings.ToLower(name)
}
@@ -109,13 +123,6 @@ func normalizeHTTPHeaderValue(values []string) string {
return strings.Join(values, ",")
}
-// SetSecurityEventsTags sets the AppSec-specific span tags when a security event occurred into the service entry span.
-func SetSecurityEventsTags(span trace.TagSetter, events []any) {
- if err := trace.SetEventSpanTags(span, events); err != nil {
- log.Error("appsec: unexpected error while creating the appsec events tags: %v", err)
- }
-}
-
func init() {
makeCollectedHTTPHeadersLookupMap()
readMonitoredClientIPHeadersConfig()
@@ -130,7 +137,7 @@ func makeCollectedHTTPHeadersLookupMap() {
func readMonitoredClientIPHeadersConfig() {
if header := os.Getenv(envClientIPHeader); header != "" {
- // Make this header the only one to consider in ClientIP
+ // Make this header the only one to consider in RemoteAddr
monitoredClientIPHeadersCfg = []string{header}
// Add this header to the list of collected headers
@@ -141,3 +148,20 @@ func readMonitoredClientIPHeadersConfig() {
monitoredClientIPHeadersCfg = defaultIPHeaders
}
}
+
+// setRequestHeadersTags sets the AppSec-specific request headers span tags.
+func setRequestHeadersTags(span trace.TagSetter, headers map[string][]string) {
+ setHeadersTags(span, "http.request.headers.", headers)
+}
+
+// setResponseHeadersTags sets the AppSec-specific response headers span tags.
+func setResponseHeadersTags(span trace.TagSetter, headers map[string][]string) {
+ setHeadersTags(span, "http.response.headers.", headers)
+}
+
+// setHeadersTags sets the AppSec-specific headers span tags.
+func setHeadersTags(span trace.TagSetter, tagPrefix string, headers map[string][]string) {
+ for h, v := range NormalizeHTTPHeaders(headers) {
+ span.SetTag(tagPrefix+h, v)
+ }
+}
diff --git a/internal/appsec/listener/httpsec/request_test.go b/internal/appsec/listener/httpsec/request_test.go
new file mode 100644
index 0000000000..38052cbb96
--- /dev/null
+++ b/internal/appsec/listener/httpsec/request_test.go
@@ -0,0 +1,241 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package httpsec
+
+import (
+ "fmt"
+ "net"
+ "net/netip"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc/metadata"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/waf"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
+)
+
+func TestClientIP(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ addr net.Addr
+ md metadata.MD
+ expectedClientIP string
+ }{
+ {
+ name: "tcp-ipv4-address",
+ addr: &net.TCPAddr{IP: net.ParseIP("1.2.3.4"), Port: 6789},
+ expectedClientIP: "1.2.3.4",
+ },
+ {
+ name: "tcp-ipv4-address",
+ addr: &net.TCPAddr{IP: net.ParseIP("1.2.3.4"), Port: 6789},
+ md: map[string][]string{"x-client-ip": {"127.0.0.1, 2.3.4.5"}},
+ expectedClientIP: "2.3.4.5",
+ },
+ {
+ name: "tcp-ipv6-address",
+ addr: &net.TCPAddr{IP: net.ParseIP("::1"), Port: 6789},
+ expectedClientIP: "::1",
+ },
+ {
+ name: "udp-ipv4-address",
+ addr: &net.UDPAddr{IP: net.ParseIP("1.2.3.4"), Port: 6789},
+ expectedClientIP: "1.2.3.4",
+ },
+ {
+ name: "udp-ipv6-address",
+ addr: &net.UDPAddr{IP: net.ParseIP("::1"), Port: 6789},
+ expectedClientIP: "::1",
+ },
+ {
+ name: "unix-socket-address",
+ addr: &net.UnixAddr{Name: "/var/my.sock"},
+ },
+ } {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ _, clientIP := ClientIPTags(tc.md, false, tc.addr.String())
+ expectedClientIP, _ := netip.ParseAddr(tc.expectedClientIP)
+ require.Equal(t, expectedClientIP.String(), clientIP.String())
+ })
+ }
+}
+
+func TestNormalizeHTTPHeaders(t *testing.T) {
+ for _, tc := range []struct {
+ headers map[string][]string
+ expected map[string]string
+ }{
+ {
+ headers: nil,
+ expected: nil,
+ },
+ {
+ headers: map[string][]string{
+ "cookie": {"not-collected"},
+ },
+ expected: nil,
+ },
+ {
+ headers: map[string][]string{
+ "cookie": {"not-collected"},
+ "x-forwarded-for": {"1.2.3.4,5.6.7.8"},
+ },
+ expected: map[string]string{
+ "x-forwarded-for": "1.2.3.4,5.6.7.8",
+ },
+ },
+ {
+ headers: map[string][]string{
+ "cookie": {"not-collected"},
+ "x-forwarded-for": {"1.2.3.4,5.6.7.8", "9.10.11.12,13.14.15.16"},
+ },
+ expected: map[string]string{
+ "x-forwarded-for": "1.2.3.4,5.6.7.8,9.10.11.12,13.14.15.16",
+ },
+ },
+ } {
+ headers := NormalizeHTTPHeaders(tc.headers)
+ require.Equal(t, tc.expected, headers)
+ }
+}
+
+type MockSpan struct {
+ Tags map[string]any
+}
+
+func (m *MockSpan) SetTag(key string, value interface{}) {
+ if m.Tags == nil {
+ m.Tags = make(map[string]any)
+ }
+ if key == ext.ManualKeep {
+ if value == samplernames.AppSec {
+ m.Tags[ext.ManualKeep] = true
+ }
+ } else {
+ m.Tags[key] = value
+ }
+}
+
+func TestTags(t *testing.T) {
+ for _, eventCase := range []struct {
+ name string
+ events []any
+ expectedTag string
+ expectedError bool
+ }{
+ {
+ name: "no-event",
+ events: nil,
+ },
+ {
+ name: "one-event",
+ events: []any{"one"},
+ expectedTag: `{"triggers":["one"]}`,
+ },
+ {
+ name: "two-events",
+ events: []any{"one", "two"},
+ expectedTag: `{"triggers":["one","two"]}`,
+ },
+ } {
+ eventCase := eventCase
+ for _, reqHeadersCase := range []struct {
+ name string
+ headers map[string][]string
+ expectedTags map[string]interface{}
+ }{
+ {
+ name: "zero-headers",
+ },
+ {
+ name: "xff-header",
+ headers: map[string][]string{
+ "X-Forwarded-For": {"1.2.3.4", "4.5.6.7"},
+ "my-header": {"something"},
+ },
+ expectedTags: map[string]interface{}{
+ "http.request.headers.x-forwarded-for": "1.2.3.4,4.5.6.7",
+ },
+ },
+ {
+ name: "xff-header",
+ headers: map[string][]string{
+ "X-Forwarded-For": {"1.2.3.4"},
+ "my-header": {"something"},
+ },
+ expectedTags: map[string]interface{}{
+ "http.request.headers.x-forwarded-for": "1.2.3.4",
+ },
+ },
+ {
+ name: "no-monitored-headers",
+ headers: map[string][]string{
+ "my-header": {"something"},
+ },
+ },
+ } {
+ reqHeadersCase := reqHeadersCase
+ for _, respHeadersCase := range []struct {
+ name string
+ headers map[string][]string
+ expectedTags map[string]interface{}
+ }{
+ {
+ name: "zero-headers",
+ },
+ {
+ name: "ct-header",
+ headers: map[string][]string{
+ "Content-Type": {"application/json"},
+ "my-header": {"something"},
+ },
+ expectedTags: map[string]interface{}{
+ "http.response.headers.content-type": "application/json",
+ },
+ },
+ {
+ name: "no-monitored-headers",
+ headers: map[string][]string{
+ "my-header": {"something"},
+ },
+ },
+ } {
+ respHeadersCase := respHeadersCase
+ t.Run(fmt.Sprintf("%s-%s-%s", eventCase.name, reqHeadersCase.name, respHeadersCase.name), func(t *testing.T) {
+ var span MockSpan
+ err := waf.SetEventSpanTags(&span, eventCase.events)
+ if eventCase.expectedError {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ setRequestHeadersTags(&span, reqHeadersCase.headers)
+ setResponseHeadersTags(&span, respHeadersCase.headers)
+
+ if eventCase.events != nil {
+ require.Subset(t, span.Tags, map[string]interface{}{
+ "_dd.appsec.json": eventCase.expectedTag,
+ "manual.keep": true,
+ "appsec.event": true,
+ "_dd.origin": "appsec",
+ })
+ }
+
+ if l := len(reqHeadersCase.expectedTags); l > 0 {
+ require.Subset(t, span.Tags, reqHeadersCase.expectedTags)
+ }
+
+ if l := len(respHeadersCase.expectedTags); l > 0 {
+ require.Subset(t, span.Tags, respHeadersCase.expectedTags)
+ }
+ })
+ }
+ }
+ }
+}
diff --git a/internal/appsec/listener/httpsec/roundtripper.go b/internal/appsec/listener/httpsec/roundtripper.go
index 0d5102466a..b72e8e8329 100644
--- a/internal/appsec/listener/httpsec/roundtripper.go
+++ b/internal/appsec/listener/httpsec/roundtripper.go
@@ -6,24 +6,35 @@
package httpsec
import (
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec/types"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/httpsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
-
- "github.com/DataDog/appsec-internal-go/limiter"
- "github.com/DataDog/go-libddwaf/v3"
)
-// RegisterRoundTripperListener registers a listener on outgoing HTTP client requests to run the WAF.
-func RegisterRoundTripperListener(op dyngo.Operation, events *trace.SecurityEventsHolder, wafCtx *waf.Context, limiter limiter.Limiter) {
- dyngo.On(op, sharedsec.MakeWAFRunListener(events, wafCtx, limiter, func(args types.RoundTripOperationArgs) waf.RunAddressData {
- return waf.RunAddressData{Ephemeral: map[string]any{ServerIoNetURLAddr: args.URL}}
- }))
+type SSRFProtectionFeature struct{}
+
+func (*SSRFProtectionFeature) String() string {
+ return "SSRF Protection"
+}
+
+func (*SSRFProtectionFeature) Stop() {}
+
+func NewSSRFProtectionFeature(config *config.Config, rootOp dyngo.Operation) (listener.Feature, error) {
+ if !config.RASP || !config.SupportedAddresses.AnyOf(addresses.ServerIoNetURLAddr) {
+ return nil, nil
+ }
+
+ feature := &SSRFProtectionFeature{}
+ dyngo.On(rootOp, feature.OnStart)
+ return feature, nil
}
-func SSRFAddressesPresent(addresses listener.AddressSet) bool {
- _, urlAddr := addresses[ServerIoNetURLAddr]
- return urlAddr
+func (*SSRFProtectionFeature) OnStart(op *httpsec.RoundTripOperation, args httpsec.RoundTripOperationArgs) {
+ dyngo.EmitData(op, waf.RunEvent{
+ Operation: op,
+ RunAddressData: addresses.NewAddressesBuilder().WithURL(args.URL).Build(),
+ })
}
diff --git a/internal/appsec/listener/listener.go b/internal/appsec/listener/listener.go
deleted file mode 100644
index 7435b29e1e..0000000000
--- a/internal/appsec/listener/listener.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-// Package listener provides functions and types used to listen to AppSec
-// instrumentation events produced by code usintrumented using the functions and
-// types found in gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter.
-package listener
-
-import waf "github.com/DataDog/go-libddwaf/v3"
-
-// AddressSet is a set of WAF addresses.
-type AddressSet map[string]struct{}
-
-// FilterAddressSet filters the supplied `supported` address set to only include
-// entries referenced by the supplied waf.Handle.
-func FilterAddressSet(supported AddressSet, handle *waf.Handle) AddressSet {
- result := make(AddressSet, len(supported))
-
- for _, addr := range handle.Addresses() {
- if _, found := supported[addr]; found {
- result[addr] = struct{}{}
- }
- }
-
- return result
-}
diff --git a/internal/appsec/listener/ossec/lfi.go b/internal/appsec/listener/ossec/lfi.go
index 12b32e1bf8..3161651147 100644
--- a/internal/appsec/listener/ossec/lfi.go
+++ b/internal/appsec/listener/ossec/lfi.go
@@ -8,39 +8,46 @@ package ossec
import (
"os"
- "github.com/DataDog/appsec-internal-go/limiter"
- waf "github.com/DataDog/go-libddwaf/v3"
-
"gopkg.in/DataDog/dd-trace-go.v1/appsec/events"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/ossec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
)
-const (
- ServerIOFSFileAddr = "server.io.fs.file"
-)
+type Feature struct{}
-func RegisterOpenListener(op dyngo.Operation, eventsHolder *trace.SecurityEventsHolder, wafCtx *waf.Context, limiter limiter.Limiter) {
- runWAF := sharedsec.MakeWAFRunListener(eventsHolder, wafCtx, limiter, func(args ossec.OpenOperationArgs) waf.RunAddressData {
- return waf.RunAddressData{Ephemeral: map[string]any{ServerIOFSFileAddr: args.Path}}
- })
+func (*Feature) String() string {
+ return "LFI Protection"
+}
+
+func (*Feature) Stop() {}
- dyngo.On(op, func(op *ossec.OpenOperation, args ossec.OpenOperationArgs) {
- dyngo.OnData(op, func(e *events.BlockingSecurityEvent) {
- dyngo.OnFinish(op, func(_ *ossec.OpenOperation, res ossec.OpenOperationRes[*os.File]) {
- if res.Err != nil {
- *res.Err = e
- }
- })
+func NewOSSecFeature(cfg *config.Config, rootOp dyngo.Operation) (listener.Feature, error) {
+ if !cfg.RASP || !cfg.SupportedAddresses.AnyOf(addresses.ServerIOFSFileAddr) {
+ return nil, nil
+ }
+
+ feature := &Feature{}
+ dyngo.On(rootOp, feature.OnStart)
+ return feature, nil
+}
+
+func (*Feature) OnStart(op *ossec.OpenOperation, args ossec.OpenOperationArgs) {
+ dyngo.OnData(op, func(err *events.BlockingSecurityEvent) {
+ dyngo.OnFinish(op, func(_ *ossec.OpenOperation, res ossec.OpenOperationRes[*os.File]) {
+ if res.Err != nil {
+ *res.Err = err
+ }
})
- runWAF(op, args)
})
-}
-func OSAddressesPresent(addresses listener.AddressSet) bool {
- _, fileAddr := addresses[ServerIOFSFileAddr]
- return fileAddr
+ dyngo.EmitData(op, waf.RunEvent{
+ Operation: op,
+ RunAddressData: addresses.NewAddressesBuilder().
+ WithFilePath(args.Path).
+ Build(),
+ })
}
diff --git a/internal/appsec/listener/sharedsec/shared.go b/internal/appsec/listener/sharedsec/shared.go
deleted file mode 100644
index 096a77b84c..0000000000
--- a/internal/appsec/listener/sharedsec/shared.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package sharedsec
-
-import (
- "encoding/json"
- "errors"
-
- "gopkg.in/DataDog/dd-trace-go.v1/appsec/events"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sharedsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
-
- "github.com/DataDog/appsec-internal-go/limiter"
- waf "github.com/DataDog/go-libddwaf/v3"
- wafErrors "github.com/DataDog/go-libddwaf/v3/errors"
-)
-
-const (
- eventRulesVersionTag = "_dd.appsec.event_rules.version"
- eventRulesErrorsTag = "_dd.appsec.event_rules.errors"
- eventRulesLoadedTag = "_dd.appsec.event_rules.loaded"
- eventRulesFailedTag = "_dd.appsec.event_rules.error_count"
- wafVersionTag = "_dd.appsec.waf.version"
-)
-
-func RunWAF(wafCtx *waf.Context, values waf.RunAddressData) waf.Result {
- result, err := wafCtx.Run(values)
- if errors.Is(err, wafErrors.ErrTimeout) {
- log.Debug("appsec: waf timeout value reached: %v", err)
- } else if err != nil {
- log.Error("appsec: unexpected waf error: %v", err)
- }
- return result
-}
-
-func MakeWAFRunListener[O dyngo.Operation, T dyngo.ArgOf[O]](
- events *trace.SecurityEventsHolder,
- wafCtx *waf.Context,
- limiter limiter.Limiter,
- toRunAddressData func(T) waf.RunAddressData,
-) func(O, T) {
- return func(op O, args T) {
- wafResult := RunWAF(wafCtx, toRunAddressData(args))
- if !wafResult.HasEvents() {
- return
- }
-
- log.Debug("appsec: WAF detected a suspicious WAF event")
-
- ProcessActions(op, wafResult.Actions)
- AddSecurityEvents(events, limiter, wafResult.Events)
- }
-}
-
-// AddSecurityEvents is a helper function to add sec events to an operation taking into account the rate limiter.
-func AddSecurityEvents(holder *trace.SecurityEventsHolder, limiter limiter.Limiter, matches []any) {
- if len(matches) > 0 && limiter.Allow() {
- holder.AddSecurityEvents(matches)
- }
-}
-
-// AddRulesMonitoringTags adds the tags related to security rules monitoring
-func AddRulesMonitoringTags(th trace.TagSetter, wafDiags *waf.Diagnostics) {
- rInfo := wafDiags.Rules
- if rInfo == nil {
- return
- }
-
- if len(rInfo.Errors) == 0 {
- rInfo.Errors = nil
- }
- rulesetErrors, err := json.Marshal(wafDiags.Rules.Errors)
- if err != nil {
- log.Error("appsec: could not marshal the waf ruleset info errors to json")
- }
- th.SetTag(eventRulesErrorsTag, string(rulesetErrors)) // avoid the tracer's call to fmt.Sprintf on the value
- th.SetTag(eventRulesLoadedTag, len(rInfo.Loaded))
- th.SetTag(eventRulesFailedTag, len(rInfo.Failed))
- th.SetTag(wafVersionTag, waf.Version())
-}
-
-// AddWAFMonitoringTags adds the tags related to the monitoring of the WAF
-func AddWAFMonitoringTags(th trace.TagSetter, rulesVersion string, stats map[string]any) {
- // Rules version is set for every request to help the backend associate WAF duration metrics with rule version
- th.SetTag(eventRulesVersionTag, rulesVersion)
-
- // Report the stats sent by the WAF
- for k, v := range stats {
- th.SetTag(k, v)
- }
-}
-
-// ProcessActions sends the relevant actions to the operation's data listener.
-// It returns true if at least one of those actions require interrupting the request handler
-// When SDKError is not nil, this error is sent to the op with EmitData so that the invoked SDK can return it
-func ProcessActions(op dyngo.Operation, actions map[string]any) (interrupt bool) {
- for aType, params := range actions {
- log.Debug("appsec: processing %s action with params %v", aType, params)
- actionArray := ActionsFromEntry(aType, params)
- if actionArray == nil {
- log.Debug("cannot process %s action with params %v", aType, params)
- continue
- }
- for _, a := range actionArray {
- a.EmitData(op)
- interrupt = interrupt || a.Blocking()
- }
- }
-
- // If any of the actions are supposed to interrupt the request, emit a blocking event for the SDK operations
- // to return an error.
- if interrupt {
- dyngo.EmitData(op, &events.BlockingSecurityEvent{})
- }
-
- return interrupt
-}
-
-// ActionsFromEntry returns one or several actions generated from the WAF returned action entry
-// Several actions are returned when the action is of block_request type since we could be blocking HTTP or GRPC
-func ActionsFromEntry(actionType string, params any) []sharedsec.Action {
- p, ok := params.(map[string]any)
- if !ok {
- return nil
- }
- switch actionType {
- case "block_request":
- return sharedsec.NewBlockAction(p)
- case "redirect_request":
- return []sharedsec.Action{sharedsec.NewRedirectAction(p)}
- case "generate_stack":
- return []sharedsec.Action{sharedsec.NewStackTraceAction(p)}
-
- default:
- log.Debug("appsec: unknown action type `%s`", actionType)
- return nil
- }
-}
diff --git a/internal/appsec/listener/sqlsec/sql.go b/internal/appsec/listener/sqlsec/sql.go
index 6e8f046f53..3a6fbc0769 100644
--- a/internal/appsec/listener/sqlsec/sql.go
+++ b/internal/appsec/listener/sqlsec/sql.go
@@ -6,31 +6,38 @@
package sqlsec
import (
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec/types"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/sqlsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
-
- "github.com/DataDog/appsec-internal-go/limiter"
- waf "github.com/DataDog/go-libddwaf/v3"
)
-const (
- ServerDBStatementAddr = "server.db.statement"
- ServerDBTypeAddr = "server.db.system"
-)
+type Feature struct{}
-func RegisterSQLListener(op dyngo.Operation, events *trace.SecurityEventsHolder, wafCtx *waf.Context, limiter limiter.Limiter) {
- dyngo.On(op, sharedsec.MakeWAFRunListener(events, wafCtx, limiter, func(args types.SQLOperationArgs) waf.RunAddressData {
- return waf.RunAddressData{Ephemeral: map[string]any{ServerDBStatementAddr: args.Query, ServerDBTypeAddr: args.Driver}}
- }))
+func (*Feature) String() string {
+ return "SQLi Protection"
}
-func SQLAddressesPresent(addresses listener.AddressSet) bool {
- _, queryAddr := addresses[ServerDBStatementAddr]
- _, driverAddr := addresses[ServerDBTypeAddr]
+func (*Feature) Stop() {}
- return queryAddr || driverAddr
+func NewSQLSecFeature(cfg *config.Config, rootOp dyngo.Operation) (listener.Feature, error) {
+ if !cfg.RASP || !cfg.SupportedAddresses.AnyOf(addresses.ServerDBTypeAddr, addresses.ServerDBStatementAddr) {
+ return nil, nil
+ }
+
+ feature := &Feature{}
+ dyngo.On(rootOp, feature.OnStart)
+ return feature, nil
+}
+func (*Feature) OnStart(op *sqlsec.SQLOperation, args sqlsec.SQLOperationArgs) {
+ dyngo.EmitData(op, waf.RunEvent{
+ Operation: op,
+ RunAddressData: addresses.NewAddressesBuilder().
+ WithDBStatement(args.Query).
+ WithDBType(args.Driver).
+ Build(),
+ })
}
diff --git a/internal/appsec/listener/trace/trace.go b/internal/appsec/listener/trace/trace.go
new file mode 100644
index 0000000000..45fb28e99f
--- /dev/null
+++ b/internal/appsec/listener/trace/trace.go
@@ -0,0 +1,53 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package trace
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener"
+)
+
+// AppSec-specific span tags that are expected to
+// be in the web service entry span (span of type `web`) when AppSec is enabled.
+var staticAppsecTags = map[string]any{
+ "_dd.appsec.enabled": 1,
+ "_dd.runtime_family": "go",
+}
+
+type AppsecSpanTransport struct{}
+
+func (*AppsecSpanTransport) String() string {
+ return "Appsec Span Transport"
+}
+
+func (*AppsecSpanTransport) Stop() {}
+
+func NewAppsecSpanTransport(_ *config.Config, rootOp dyngo.Operation) (listener.Feature, error) {
+ ast := &AppsecSpanTransport{}
+
+ dyngo.On(rootOp, ast.OnServiceEntryStart)
+ dyngo.On(rootOp, ast.OnSpanStart)
+
+ return ast, nil
+}
+
+// OnServiceEntryStart is the start listener of the trace.ServiceEntrySpanOperation start event.
+// It listens for tags and serializable tags and sets them on the span when finishing the operation.
+func (*AppsecSpanTransport) OnServiceEntryStart(op *trace.ServiceEntrySpanOperation, _ trace.ServiceEntrySpanArgs) {
+ op.SetTags(staticAppsecTags)
+ dyngo.OnData(op, op.OnSpanTagEvent)
+ dyngo.OnData(op, op.OnServiceEntrySpanTagEvent)
+ dyngo.OnData(op, op.OnJSONServiceEntrySpanTagEvent)
+ dyngo.OnData(op, op.OnServiceEntrySpanTagsBulkEvent)
+}
+
+// OnSpanStart is the start listener of the trace.SpanOperation start event.
+// It listens for tags and sets them on the current span when finishing the operation.
+func (*AppsecSpanTransport) OnSpanStart(op *trace.SpanOperation, _ trace.SpanArgs) {
+ dyngo.OnData(op, op.OnSpanTagEvent)
+}
diff --git a/internal/appsec/listener/usersec/usec.go b/internal/appsec/listener/usersec/usec.go
new file mode 100644
index 0000000000..c8a6458019
--- /dev/null
+++ b/internal/appsec/listener/usersec/usec.go
@@ -0,0 +1,54 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package usersec
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/usersec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener"
+)
+
+type Feature struct{}
+
+func (*Feature) String() string {
+ return "User Security"
+}
+
+func (*Feature) Stop() {}
+
+func NewUserSecFeature(cfg *config.Config, rootOp dyngo.Operation) (listener.Feature, error) {
+ if !cfg.SupportedAddresses.AnyOf(
+ addresses.UserIDAddr,
+ addresses.UserSessionIDAddr,
+ addresses.UserLoginSuccessAddr,
+ addresses.UserLoginFailureAddr) {
+ return nil, nil
+ }
+
+ feature := &Feature{}
+ dyngo.OnFinish(rootOp, feature.OnFinish)
+ return feature, nil
+}
+
+func (*Feature) OnFinish(op *usersec.UserLoginOperation, res usersec.UserLoginOperationRes) {
+ builder := addresses.NewAddressesBuilder().
+ WithUserID(res.UserID).
+ WithUserSessionID(res.SessionID)
+
+ if res.Success {
+ builder = builder.WithUserLoginSuccess()
+ } else {
+ builder = builder.WithUserLoginFailure()
+ }
+
+ dyngo.EmitData(op, waf.RunEvent{
+ Operation: op,
+ RunAddressData: builder.Build(),
+ })
+}
diff --git a/internal/appsec/listener/waf/tags.go b/internal/appsec/listener/waf/tags.go
new file mode 100644
index 0000000000..bac41ce5ed
--- /dev/null
+++ b/internal/appsec/listener/waf/tags.go
@@ -0,0 +1,101 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package waf
+
+import (
+ "encoding/json"
+ "fmt"
+
+ waf "github.com/DataDog/go-libddwaf/v3"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
+)
+
+const (
+ wafSpanTagPrefix = "_dd.appsec."
+ eventRulesVersionTag = wafSpanTagPrefix + "event_rules.version"
+ eventRulesErrorsTag = wafSpanTagPrefix + "event_rules.errors"
+ eventRulesLoadedTag = wafSpanTagPrefix + "event_rules.loaded"
+ eventRulesFailedTag = wafSpanTagPrefix + "event_rules.error_count"
+ wafVersionTag = wafSpanTagPrefix + "waf.version"
+
+ // BlockedRequestTag used to convey whether a request is blocked
+ BlockedRequestTag = "appsec.blocked"
+)
+
+// AddRulesMonitoringTags adds the tags related to security rules monitoring
+func AddRulesMonitoringTags(th trace.TagSetter, wafDiags waf.Diagnostics) {
+ rInfo := wafDiags.Rules
+ if rInfo == nil {
+ return
+ }
+
+ var rulesetErrors []byte
+ var err error
+ rulesetErrors, err = json.Marshal(wafDiags.Rules.Errors)
+ if err != nil {
+ log.Error("appsec: could not marshal the waf ruleset info errors to json")
+ }
+ th.SetTag(eventRulesErrorsTag, string(rulesetErrors))
+ th.SetTag(eventRulesLoadedTag, len(rInfo.Loaded))
+ th.SetTag(eventRulesFailedTag, len(rInfo.Failed))
+ th.SetTag(wafVersionTag, waf.Version())
+ th.SetTag(ext.ManualKeep, samplernames.AppSec)
+}
+
+// AddWAFMonitoringTags adds the tags related to the monitoring of the Feature
+func AddWAFMonitoringTags(th trace.TagSetter, rulesVersion string, stats map[string]any) {
+ // Rules version is set for every request to help the backend associate Feature duration metrics with rule version
+ th.SetTag(eventRulesVersionTag, rulesVersion)
+
+ // Report the stats sent by the Feature
+ for k, v := range stats {
+ th.SetTag(wafSpanTagPrefix+k, v)
+ }
+}
+
+// SetEventSpanTags sets the security event span tags into the service entry span.
+func SetEventSpanTags(span trace.TagSetter, events []any) error {
+ if len(events) == 0 {
+ return nil
+ }
+
+ // Set the appsec event span tag
+ val, err := makeEventTagValue(events)
+ if err != nil {
+ return err
+ }
+ span.SetTag("_dd.appsec.json", string(val))
+ // Keep this span due to the security event
+ //
+ // This is a workaround to tell the tracer that the trace was kept by AppSec.
+ // Passing any other value than `appsec.SamplerAppSec` has no effect.
+ // Customers should use `span.SetTag(ext.ManualKeep, true)` pattern
+ // to keep the trace, manually.
+ span.SetTag(ext.ManualKeep, samplernames.AppSec)
+ span.SetTag("_dd.origin", "appsec")
+ // Set the appsec.event tag needed by the appsec backend
+ span.SetTag("appsec.event", true)
+ return nil
+}
+
+// Create the value of the security event tag.
+func makeEventTagValue(events []any) (json.RawMessage, error) {
+ type eventTagValue struct {
+ Triggers []any `json:"triggers"`
+ }
+
+ tag, err := json.Marshal(eventTagValue{events})
+ if err != nil {
+ return nil, fmt.Errorf("unexpected error while serializing the appsec event span tag: %v", err)
+ }
+
+ return tag, nil
+}
diff --git a/internal/appsec/listener/sharedsec/shared_test.go b/internal/appsec/listener/waf/tags_test.go
similarity index 73%
rename from internal/appsec/listener/sharedsec/shared_test.go
rename to internal/appsec/listener/waf/tags_test.go
index e53ea7a1e4..d69a8fcdd7 100644
--- a/internal/appsec/listener/sharedsec/shared_test.go
+++ b/internal/appsec/listener/waf/tags_test.go
@@ -1,16 +1,17 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
+// Copyright 2024 Datadog, Inc.
-package sharedsec
+package waf
import (
"testing"
waf "github.com/DataDog/go-libddwaf/v3"
"github.com/stretchr/testify/require"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/trace"
)
const (
@@ -21,7 +22,7 @@ const (
// Test that internal functions used to set span tags use the correct types
func TestTagsTypes(t *testing.T) {
- th := trace.NewTagsHolder()
+ th := make(trace.TestTagSetter)
wafDiags := waf.Diagnostics{
Version: "1.3.0",
Rules: &waf.DiagnosticEntry{
@@ -31,14 +32,16 @@ func TestTagsTypes(t *testing.T) {
},
}
- AddRulesMonitoringTags(&th, &wafDiags)
+ AddRulesMonitoringTags(&th, wafDiags)
stats := map[string]any{
- wafDurationTag: 10,
- wafDurationExtTag: 20,
- wafTimeoutTag: 0,
- "_dd.appsec.waf.truncations.depth": []int{1, 2, 3},
- "_dd.appsec.waf.run": 12000,
+ "waf.duration": 10,
+ "rasp.duration": 10,
+ "waf.duration_ext": 20,
+ "rasp.duration_ext": 20,
+ "waf.timeouts": 0,
+ "waf.truncations.depth": []int{1, 2, 3},
+ "waf.run": 12000,
}
AddWAFMonitoringTags(&th, "1.2.3", stats)
diff --git a/internal/appsec/listener/waf/waf.go b/internal/appsec/listener/waf/waf.go
new file mode 100644
index 0000000000..308eaa25d7
--- /dev/null
+++ b/internal/appsec/listener/waf/waf.go
@@ -0,0 +1,128 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package waf
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/DataDog/appsec-internal-go/limiter"
+ wafv3 "github.com/DataDog/go-libddwaf/v3"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/appsec/events"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/actions"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/stacktrace"
+)
+
+type Feature struct {
+ timeout time.Duration
+ limiter *limiter.TokenTicker
+ handle *wafv3.Handle
+ supportedAddrs config.AddressSet
+ reportRulesTags sync.Once
+}
+
+func NewWAFFeature(cfg *config.Config, rootOp dyngo.Operation) (listener.Feature, error) {
+ if ok, err := wafv3.Load(); err != nil {
+ // 1. If there is an error and the loading is not ok: log as an unexpected error case and quit appsec
+ // Note that we assume here that the test for the unsupported target has been done before calling
+ // this method, so it is now considered an error for this method
+ if !ok {
+ return nil, fmt.Errorf("error while loading libddwaf: %w", err)
+ }
+ // 2. If there is an error and the loading is ok: log as an informative error where appsec can be used
+ log.Error("appsec: non-critical error while loading libddwaf: %v", err)
+ }
+
+ newHandle, err := wafv3.NewHandle(cfg.RulesManager.Latest, cfg.Obfuscator.KeyRegex, cfg.Obfuscator.ValueRegex)
+ if err != nil {
+ return nil, err
+ }
+
+ cfg.SupportedAddresses = config.NewAddressSet(newHandle.Addresses())
+
+ tokenTicker := limiter.NewTokenTicker(cfg.TraceRateLimit, cfg.TraceRateLimit)
+ tokenTicker.Start()
+
+ feature := &Feature{
+ handle: newHandle,
+ timeout: cfg.WAFTimeout,
+ limiter: tokenTicker,
+ supportedAddrs: cfg.SupportedAddresses,
+ }
+
+ dyngo.On(rootOp, feature.onStart)
+ dyngo.OnFinish(rootOp, feature.onFinish)
+
+ return feature, nil
+}
+
+func (waf *Feature) onStart(op *waf.ContextOperation, _ waf.ContextArgs) {
+ waf.reportRulesTags.Do(func() {
+ AddRulesMonitoringTags(op, waf.handle.Diagnostics())
+ })
+
+ ctx, err := waf.handle.NewContextWithBudget(waf.timeout)
+ if err != nil {
+ log.Debug("appsec: failed to create Feature context: %v", err)
+ }
+
+ op.SwapContext(ctx)
+ op.SetLimiter(waf.limiter)
+ op.SetSupportedAddresses(waf.supportedAddrs)
+
+ // Run the WAF with the given address data
+ dyngo.OnData(op, op.OnEvent)
+
+ waf.SetupActionHandlers(op)
+}
+
+func (waf *Feature) SetupActionHandlers(op *waf.ContextOperation) {
+ // Set the blocking tag on the operation when a blocking event is received
+ dyngo.OnData(op, func(_ *events.BlockingSecurityEvent) {
+ op.SetTag(BlockedRequestTag, true)
+ })
+
+ // Register the stacktrace if one is requested by a WAF action
+ dyngo.OnData(op, func(err *actions.StackTraceAction) {
+ op.AddStackTraces(err.Event)
+ })
+}
+
+func (waf *Feature) onFinish(op *waf.ContextOperation, _ waf.ContextRes) {
+ ctx := op.SwapContext(nil)
+ if ctx == nil {
+ return
+ }
+
+ ctx.Close()
+
+ AddWAFMonitoringTags(op, waf.handle.Diagnostics().Version, ctx.Stats().Metrics())
+ if err := SetEventSpanTags(op, op.Events()); err != nil {
+ log.Debug("appsec: failed to set event span tags: %v", err)
+ }
+
+ op.SetSerializableTags(op.Derivatives())
+ if stacks := op.StackTraces(); len(stacks) > 0 {
+ op.SetTag(stacktrace.SpanKey, stacktrace.GetSpanValue(stacks...))
+ }
+}
+
+func (*Feature) String() string {
+ return "Web Application Firewall"
+}
+
+func (waf *Feature) Stop() {
+ waf.limiter.Stop()
+ waf.handle.Close()
+}
diff --git a/internal/appsec/remoteconfig.go b/internal/appsec/remoteconfig.go
index 9dddf65a8d..ccf6fe7631 100644
--- a/internal/appsec/remoteconfig.go
+++ b/internal/appsec/remoteconfig.go
@@ -9,10 +9,12 @@ import (
"encoding/json"
"errors"
"fmt"
+ "maps"
"os"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion"
"gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig"
internal "github.com/DataDog/appsec-internal-go/appsec"
@@ -41,20 +43,13 @@ func statusesFromUpdate(u remoteconfig.ProductUpdate, ack bool, err error) map[s
return statuses
}
-func mergeMaps[K comparable, V any](m1 map[K]V, m2 map[K]V) map[K]V {
- for key, value := range m2 {
- m1[key] = value
- }
- return m1
-}
-
// combineRCRulesUpdates updates the state of the given RulesManager with the combination of all the provided rules updates
func combineRCRulesUpdates(r *config.RulesManager, updates map[string]remoteconfig.ProductUpdate) (statuses map[string]rc.ApplyStatus, err error) {
// Spare some re-allocations (but there may still be some because 1 update may contain N configs)
statuses = make(map[string]rc.ApplyStatus, len(updates))
// Set the default statuses for all updates to unacknowledged
for _, u := range updates {
- statuses = mergeMaps(statuses, statusesFromUpdate(u, false, nil))
+ maps.Copy(statuses, statusesFromUpdate(u, false, nil))
}
updateLoop:
@@ -66,9 +61,9 @@ updateLoop:
switch p {
case rc.ProductASMData:
// Merge all rules data entries together and store them as a RulesManager edit entry
- rulesData, status := mergeRulesData(u)
- statuses = mergeMaps(statuses, status)
- r.AddEdit("asmdata", config.RulesFragment{RulesData: rulesData})
+ fragment, status := mergeASMDataUpdates(u)
+ maps.Copy(statuses, status)
+ r.AddEdit("asmdata", fragment)
case rc.ProductASMDD:
var (
removalFound = false
@@ -83,7 +78,7 @@ updateLoop:
}
// Already seen a removal or an update, return an error
if err != nil {
- statuses = mergeMaps(statuses, statusesFromUpdate(u, true, err))
+ maps.Copy(statuses, statusesFromUpdate(u, true, err))
break updateLoop
}
@@ -103,7 +98,7 @@ updateLoop:
if removalFound {
log.Debug("appsec: Remote config: ASM_DD config removed. Switching back to default rules")
r.ChangeBase(config.DefaultRulesFragment(), "")
- statuses = mergeMaps(statuses, statusesFromUpdate(u, true, nil))
+ maps.Copy(statuses, statusesFromUpdate(u, true, nil))
}
continue
}
@@ -145,7 +140,7 @@ updateLoop:
// Set all statuses to ack if no error occured
if err == nil {
for _, u := range updates {
- statuses = mergeMaps(statuses, statusesFromUpdate(u, true, nil))
+ maps.Copy(statuses, statusesFromUpdate(u, true, nil))
}
}
@@ -182,17 +177,18 @@ func (a *appsec) onRCRulesUpdate(updates map[string]remoteconfig.ProductUpdate)
r.Compile()
log.Debug("appsec: Remote config: final compiled rules: %s", r.String())
+ // Replace the RulesManager with the new one holding the new state
+ a.cfg.RulesManager = &r
+
// If an error occurs while updating the WAF handle, don't swap the RulesManager and propagate the error
// to all config statuses since we can't know which config is the faulty one
- if err = a.swapWAF(r.Latest); err != nil {
+ if err = a.SwapRootOperation(); err != nil {
log.Error("appsec: Remote config: could not apply the new security rules: %v", err)
for k := range statuses {
statuses[k] = genApplyStatus(true, err)
}
return statuses
}
- // Replace the RulesManager with the new one holding the new state
- a.cfg.RulesManager = &r
return statuses
}
@@ -240,12 +236,41 @@ func (a *appsec) handleASMFeatures(u remoteconfig.ProductUpdate) map[string]rc.A
return statuses
}
-func mergeRulesData(u remoteconfig.ProductUpdate) ([]config.RuleDataEntry, map[string]rc.ApplyStatus) {
+func mergeASMDataUpdates(u remoteconfig.ProductUpdate) (config.RulesFragment, map[string]rc.ApplyStatus) {
// Following the RFC, merging should only happen when two rules data with the same ID and same Type are received
- // allRulesData[ID][Type] will return the rules data of said id and type, if it exists
- allRulesData := make(map[string]map[string]config.RuleDataEntry)
+ type mapKey struct {
+ id string
+ typ string
+ }
+ mergedRulesData := make(map[mapKey]config.DataEntry)
+ mergedExclusionData := make(map[mapKey]config.DataEntry)
statuses := statusesFromUpdate(u, true, nil)
+ mergeUpdateEntry := func(mergeMap map[mapKey]config.DataEntry, data []config.DataEntry) {
+ for _, ruleData := range data {
+ key := mapKey{id: ruleData.ID, typ: ruleData.Type}
+ if data, ok := mergeMap[key]; ok {
+ // Merge rules data entries with the same ID and Type
+ mergeMap[key] = config.DataEntry{
+ ID: data.ID,
+ Type: data.Type,
+ Data: mergeRulesDataEntries(data.Data, ruleData.Data),
+ }
+ continue
+ }
+
+ mergeMap[key] = ruleData
+ }
+ }
+
+ mapValues := func(m map[mapKey]config.DataEntry) []config.DataEntry {
+ values := make([]config.DataEntry, 0, len(m))
+ for _, v := range m {
+ values = append(values, v)
+ }
+ return values
+ }
+
for path, raw := range u {
log.Debug("appsec: Remote config: processing %s", path)
@@ -257,36 +282,30 @@ func mergeRulesData(u remoteconfig.ProductUpdate) ([]config.RuleDataEntry, map[s
continue
}
- var rulesData config.RulesData
- if err := json.Unmarshal(raw, &rulesData); err != nil {
+ var asmdataUpdate struct {
+ RulesData []config.DataEntry `json:"rules_data,omitempty"`
+ ExclusionData []config.DataEntry `json:"exclusion_data,omitempty"`
+ }
+ if err := json.Unmarshal(raw, &asmdataUpdate); err != nil {
log.Debug("appsec: Remote config: error while unmarshalling payload for %s: %v. Configuration won't be applied.", path, err)
statuses[path] = genApplyStatus(false, err)
continue
}
- // Check each entry against allRulesData to see if merging is necessary
- for _, ruleData := range rulesData.RulesData {
- if allRulesData[ruleData.ID] == nil {
- allRulesData[ruleData.ID] = make(map[string]config.RuleDataEntry)
- }
- if data, ok := allRulesData[ruleData.ID][ruleData.Type]; ok {
- // Merge rules data entries with the same ID and Type
- data.Data = mergeRulesDataEntries(data.Data, ruleData.Data)
- allRulesData[ruleData.ID][ruleData.Type] = data
- } else {
- allRulesData[ruleData.ID][ruleData.Type] = ruleData
- }
- }
+ mergeUpdateEntry(mergedExclusionData, asmdataUpdate.ExclusionData)
+ mergeUpdateEntry(mergedRulesData, asmdataUpdate.RulesData)
}
- // Aggregate all the rules data before passing it over to the WAF
- var rulesData []config.RuleDataEntry
- for _, m := range allRulesData {
- for _, data := range m {
- rulesData = append(rulesData, data)
- }
+ var fragment config.RulesFragment
+ if len(mergedRulesData) > 0 {
+ fragment.RulesData = mapValues(mergedRulesData)
+ }
+
+ if len(mergedExclusionData) > 0 {
+ fragment.ExclusionData = mapValues(mergedExclusionData)
}
- return rulesData, statuses
+
+ return fragment, statuses
}
// mergeRulesDataEntries merges two slices of rules data entries together, removing duplicates and
@@ -372,6 +391,11 @@ var blockingCapabilities = [...]remoteconfig.Capability{
remoteconfig.ASMCustomRules,
remoteconfig.ASMCustomBlockingResponse,
remoteconfig.ASMTrustedIPs,
+ remoteconfig.ASMExclusionData,
+ remoteconfig.ASMEndpointFingerprinting,
+ remoteconfig.ASMSessionFingerprinting,
+ remoteconfig.ASMNetworkFingerprinting,
+ remoteconfig.ASMHeaderFingerprinting,
}
func (a *appsec) enableRCBlocking() {
@@ -409,7 +433,14 @@ func (a *appsec) enableRASP() {
if err := remoteconfig.RegisterCapability(remoteconfig.ASMRASPSSRF); err != nil {
log.Debug("appsec: Remote config: couldn't register RASP SSRF: %v", err)
}
- // TODO: register other RASP capabilities when supported
+ if err := remoteconfig.RegisterCapability(remoteconfig.ASMRASPSQLI); err != nil {
+ log.Debug("appsec: Remote config: couldn't register RASP SQLI: %v", err)
+ }
+ if orchestrion.Enabled() {
+ if err := remoteconfig.RegisterCapability(remoteconfig.ASMRASPLFI); err != nil {
+ log.Debug("appsec: Remote config: couldn't register RASP LFI: %v", err)
+ }
+ }
}
func (a *appsec) disableRCBlocking() {
diff --git a/internal/appsec/remoteconfig_test.go b/internal/appsec/remoteconfig_test.go
index 34ebb224da..908cea0793 100644
--- a/internal/appsec/remoteconfig_test.go
+++ b/internal/appsec/remoteconfig_test.go
@@ -10,13 +10,12 @@ import (
"errors"
"os"
"reflect"
- "sort"
+ "slices"
"strings"
"testing"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/sharedsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
"gopkg.in/DataDog/dd-trace-go.v1/internal/remoteconfig"
internal "github.com/DataDog/appsec-internal-go/appsec"
@@ -119,63 +118,63 @@ func TestMergeRulesData(t *testing.T) {
for _, tc := range []struct {
name string
update remoteconfig.ProductUpdate
- expected []config.RuleDataEntry
+ expected config.RulesFragment
statuses map[string]rc.ApplyStatus
}{
{
name: "empty-rule-data",
update: map[string][]byte{},
- statuses: map[string]rc.ApplyStatus{"some/path": {State: rc.ApplyStateAcknowledged}},
+ statuses: map[string]rc.ApplyStatus{},
},
{
name: "bad-json",
update: map[string][]byte{
"some/path": []byte(`[}]`),
},
- statuses: map[string]rc.ApplyStatus{"some/path": {State: rc.ApplyStateError}},
+ statuses: map[string]rc.ApplyStatus{"some/path": {State: rc.ApplyStateError, Error: "invalid character '}' looking for beginning of value"}},
},
{
- name: "single-value",
+ name: "single-rules-value",
update: map[string][]byte{
"some/path": []byte(`{"rules_data":[{"id":"test","type":"data_with_expiration","data":[{"expiration":3494138481,"value":"user1"}]}]}`),
},
- expected: []config.RuleDataEntry{{ID: "test", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
+ expected: config.RulesFragment{RulesData: []config.DataEntry{{ID: "test", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
{Expiration: 3494138481, Value: "user1"},
- }}},
+ }}}},
statuses: map[string]rc.ApplyStatus{"some/path": {State: rc.ApplyStateAcknowledged}},
},
{
- name: "multiple-values",
+ name: "multiple-rules-values",
update: map[string][]byte{
"some/path": []byte(`{"rules_data":[{"id":"test","type":"data_with_expiration","data":[{"expiration":3494138481,"value":"user1"},{"expiration":3494138441,"value":"user2"}]}]}`),
},
- expected: []config.RuleDataEntry{{ID: "test", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
+ expected: config.RulesFragment{RulesData: []config.DataEntry{{ID: "test", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
{Expiration: 3494138481, Value: "user1"},
{Expiration: 3494138441, Value: "user2"},
- }}},
+ }}}},
statuses: map[string]rc.ApplyStatus{"some/path": {State: rc.ApplyStateAcknowledged}},
},
{
- name: "multiple-entries",
+ name: "multiple-rules-entries",
update: map[string][]byte{
"some/path": []byte(`{"rules_data":[{"id":"test1","type":"data_with_expiration","data":[{"expiration":3494138444,"value":"user3"}]},{"id":"test2","type":"data_with_expiration","data":[{"expiration":3495138481,"value":"user4"}]}]}`),
},
- expected: []config.RuleDataEntry{
+ expected: config.RulesFragment{RulesData: []config.DataEntry{
{ID: "test1", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
{Expiration: 3494138444, Value: "user3"},
}}, {ID: "test2", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
{Expiration: 3495138481, Value: "user4"},
}},
- },
+ }},
statuses: map[string]rc.ApplyStatus{"some/path": {State: rc.ApplyStateAcknowledged}},
},
{
- name: "merging-entries",
+ name: "merging-rules-entries",
update: map[string][]byte{
"some/path/1": []byte(`{"rules_data":[{"id":"test1","type":"data_with_expiration","data":[{"expiration":3494138444,"value":"user3"}]},{"id":"test2","type":"data_with_expiration","data":[{"expiration":3495138481,"value":"user4"}]}]}`),
"some/path/2": []byte(`{"rules_data":[{"id":"test1","type":"data_with_expiration","data":[{"expiration":3494138445,"value":"user3"}]},{"id":"test2","type":"data_with_expiration","data":[{"expiration":0,"value":"user5"}]}]}`),
},
- expected: []config.RuleDataEntry{
+ expected: config.RulesFragment{RulesData: []config.DataEntry{
{ID: "test1", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
{Expiration: 3494138445, Value: "user3"},
}},
@@ -183,7 +182,62 @@ func TestMergeRulesData(t *testing.T) {
{Expiration: 3495138481, Value: "user4"},
{Expiration: 0, Value: "user5"},
}},
+ }},
+ statuses: map[string]rc.ApplyStatus{
+ "some/path/1": {State: rc.ApplyStateAcknowledged},
+ "some/path/2": {State: rc.ApplyStateAcknowledged},
+ },
+ },
+ {
+ name: "single-exclusions-value",
+ update: map[string][]byte{
+ "some/path": []byte(`{"exclusion_data":[{"id":"test","type":"data_with_expiration","data":[{"expiration":3494138481,"value":"user1"}]}]}`),
+ },
+ expected: config.RulesFragment{ExclusionData: []config.DataEntry{{ID: "test", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
+ {Expiration: 3494138481, Value: "user1"},
+ }}}},
+ statuses: map[string]rc.ApplyStatus{"some/path": {State: rc.ApplyStateAcknowledged}},
+ },
+ {
+ name: "multiple-exclusions-values",
+ update: map[string][]byte{
+ "some/path": []byte(`{"exclusion_data":[{"id":"test","type":"data_with_expiration","data":[{"expiration":3494138481,"value":"user1"},{"expiration":3494138441,"value":"user2"}]}]}`),
},
+ expected: config.RulesFragment{ExclusionData: []config.DataEntry{{ID: "test", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
+ {Expiration: 3494138481, Value: "user1"},
+ {Expiration: 3494138441, Value: "user2"},
+ }}}},
+ statuses: map[string]rc.ApplyStatus{"some/path": {State: rc.ApplyStateAcknowledged}},
+ },
+ {
+ name: "multiple-exclusions-entries",
+ update: map[string][]byte{
+ "some/path": []byte(`{"exclusion_data":[{"id":"test1","type":"data_with_expiration","data":[{"expiration":3494138444,"value":"user3"}]},{"id":"test2","type":"data_with_expiration","data":[{"expiration":3495138481,"value":"user4"}]}]}`),
+ },
+ expected: config.RulesFragment{ExclusionData: []config.DataEntry{
+ {ID: "test1", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
+ {Expiration: 3494138444, Value: "user3"},
+ }}, {ID: "test2", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
+ {Expiration: 3495138481, Value: "user4"},
+ }},
+ }},
+ statuses: map[string]rc.ApplyStatus{"some/path": {State: rc.ApplyStateAcknowledged}},
+ },
+ {
+ name: "merging-exclusions-entries",
+ update: map[string][]byte{
+ "some/path/1": []byte(`{"exclusion_data":[{"id":"test1","type":"data_with_expiration","data":[{"expiration":3494138444,"value":"user3"}]},{"id":"test2","type":"data_with_expiration","data":[{"expiration":3495138481,"value":"user4"}]}]}`),
+ "some/path/2": []byte(`{"exclusion_data":[{"id":"test1","type":"data_with_expiration","data":[{"expiration":3494138445,"value":"user3"}]},{"id":"test2","type":"data_with_expiration","data":[{"expiration":0,"value":"user5"}]}]}`),
+ },
+ expected: config.RulesFragment{ExclusionData: []config.DataEntry{
+ {ID: "test1", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
+ {Expiration: 3494138445, Value: "user3"},
+ }},
+ {ID: "test2", Type: "data_with_expiration", Data: []rc.ASMDataRuleDataEntry{
+ {Expiration: 3495138481, Value: "user4"},
+ {Expiration: 0, Value: "user5"},
+ }},
+ }},
statuses: map[string]rc.ApplyStatus{
"some/path/1": {State: rc.ApplyStateAcknowledged},
"some/path/2": {State: rc.ApplyStateAcknowledged},
@@ -191,30 +245,27 @@ func TestMergeRulesData(t *testing.T) {
},
} {
t.Run(tc.name, func(t *testing.T) {
- merged, statuses := mergeRulesData(tc.update)
- // Sort the compared elements since ordering is not guaranteed and the slice hold types that embed
- // more slices
- require.Len(t, merged, len(tc.expected))
- sort.Slice(merged, func(i, j int) bool {
- return strings.Compare(merged[i].ID, merged[j].ID) < 0
- })
- sort.Slice(tc.expected, func(i, j int) bool {
- return strings.Compare(merged[i].ID, merged[j].ID) < 0
- })
-
- for i := range tc.expected {
- require.Equal(t, tc.expected[i].ID, merged[i].ID)
- require.Equal(t, tc.expected[i].Type, merged[i].Type)
- require.ElementsMatch(t, tc.expected[i].Data, merged[i].Data)
- }
- for k := range statuses {
- require.Equal(t, tc.statuses[k].State, statuses[k].State)
- if statuses[k].State == rc.ApplyStateError {
- require.NotEmpty(t, statuses[k].Error)
- } else {
- require.Empty(t, statuses[k].Error)
+ fragment, statuses := mergeASMDataUpdates(tc.update)
+
+ // Sort the data entries to make the comparison easier
+ sort := func(actual []config.DataEntry) {
+ slices.SortStableFunc(actual, func(a, b config.DataEntry) int {
+ return strings.Compare(a.ID, b.ID)
+ })
+ for _, data := range actual {
+ slices.SortStableFunc(data.Data, func(a, b rc.ASMDataRuleDataEntry) int {
+ return strings.Compare(a.Value, b.Value)
+ })
}
}
+
+ sort(fragment.RulesData)
+ sort(fragment.ExclusionData)
+ sort(tc.expected.RulesData)
+ sort(tc.expected.ExclusionData)
+
+ require.Equal(t, tc.expected, fragment)
+ require.Equal(t, tc.statuses, statuses)
})
}
}
@@ -437,7 +488,7 @@ func craftRCUpdates(fragments map[string]config.RulesFragment) map[string]remote
update[rc.ProductASM] = make(remoteconfig.ProductUpdate)
}
update[rc.ProductASM][path] = data
- } else if len(frag.RulesData) > 0 {
+ } else if len(frag.RulesData) > 0 || len(frag.ExclusionData) > 0 {
if _, ok := update[rc.ProductASMData]; !ok {
update[rc.ProductASMData] = make(remoteconfig.ProductUpdate)
}
@@ -457,7 +508,7 @@ type testRulesOverrideEntry struct {
func TestOnRCUpdate(t *testing.T) {
- BaseRuleset, err := config.NewRulesManeger(nil)
+ BaseRuleset, err := config.NewRulesManager(nil)
require.NoError(t, err)
BaseRuleset.Compile()
@@ -671,7 +722,7 @@ func TestOnRCUpdate(t *testing.T) {
}
func TestOnRCUpdateStatuses(t *testing.T) {
- invalidRuleset, err := config.NewRulesManeger([]byte(`{"version": "2.2", "metadata": {"rules_version": "1.4.2"}, "rules": [{"id": "id","name":"name","tags":{},"conditions":[],"transformers":[],"on_match":[]}]}`))
+ invalidRuleset, err := config.NewRulesManager([]byte(`{"version": "2.2", "metadata": {"rules_version": "1.4.2"}, "rules": [{"id": "id","name":"name","tags":{},"conditions":[],"transformers":[],"on_match":[]}]}`))
require.NoError(t, err)
invalidRules := invalidRuleset.Base
overrides := config.RulesFragment{
@@ -787,10 +838,12 @@ func TestWafRCUpdate(t *testing.T) {
require.NoError(t, err)
defer wafCtx.Close()
values := map[string]interface{}{
- httpsec.ServerRequestPathParamsAddr: "/rfiinc.txt",
+ addresses.ServerRequestPathParamsAddr: "/rfiinc.txt",
}
+
// Make sure the rule matches as expected
- result := sharedsec.RunWAF(wafCtx, waf.RunAddressData{Persistent: values})
+ result, err := wafCtx.Run(waf.RunAddressData{Persistent: values})
+ require.NoError(t, err)
require.Contains(t, jsonString(t, result.Events), "crs-913-120")
require.Empty(t, result.Actions)
// Simulate an RC update that disables the rule
@@ -807,7 +860,8 @@ func TestWafRCUpdate(t *testing.T) {
require.NoError(t, err)
defer newWafCtx.Close()
// Make sure the rule returns a blocking action when matching
- result = sharedsec.RunWAF(newWafCtx, waf.RunAddressData{Persistent: values})
+ result, err = newWafCtx.Run(waf.RunAddressData{Persistent: values})
+ require.NoError(t, err)
require.Contains(t, jsonString(t, result.Events), "crs-913-120")
require.Contains(t, result.Actions, "block_request")
})
diff --git a/internal/appsec/testdata/fp.json b/internal/appsec/testdata/fp.json
new file mode 100644
index 0000000000..52af47f36c
--- /dev/null
+++ b/internal/appsec/testdata/fp.json
@@ -0,0 +1,207 @@
+{
+ "version": "2.2",
+ "metadata": {
+ "rules_version": "1.4.2"
+ },
+ "rules": [
+ {
+ "id": "crs-933-130-block",
+ "name": "PHP Injection Attack: Global Variables Found",
+ "tags": {
+ "type": "php_code_injection",
+ "crs_id": "933130",
+ "category": "attack_attempt",
+ "confidence": "1"
+ },
+ "conditions": [
+ {
+ "parameters": {
+ "inputs": [
+ {
+ "address": "server.request.query"
+ }
+ ],
+ "list": [
+ "$globals"
+ ]
+ },
+ "operator": "phrase_match"
+ }
+ ],
+ "transformers": [
+ "lowercase"
+ ]
+ }
+ ],
+ "processors": [
+ {
+ "id": "http-endpoint-fingerprint",
+ "generator": "http_endpoint_fingerprint",
+ "conditions": [
+ {
+ "operator": "exists",
+ "parameters": {
+ "inputs": [
+ {
+ "address": "waf.context.event"
+ },
+ {
+ "address": "server.business_logic.users.login.failure"
+ },
+ {
+ "address": "server.business_logic.users.login.success"
+ }
+ ]
+ }
+ }
+ ],
+ "parameters": {
+ "mappings": [
+ {
+ "method": [
+ {
+ "address": "server.request.method"
+ }
+ ],
+ "uri_raw": [
+ {
+ "address": "server.request.uri.raw"
+ }
+ ],
+ "body": [
+ {
+ "address": "server.request.body"
+ }
+ ],
+ "query": [
+ {
+ "address": "server.request.query"
+ }
+ ],
+ "output": "_dd.appsec.fp.http.endpoint"
+ }
+ ]
+ },
+ "evaluate": false,
+ "output": true
+ },
+ {
+ "id": "http-header-fingerprint",
+ "generator": "http_header_fingerprint",
+ "conditions": [
+ {
+ "operator": "exists",
+ "parameters": {
+ "inputs": [
+ {
+ "address": "waf.context.event"
+ },
+ {
+ "address": "server.business_logic.users.login.failure"
+ },
+ {
+ "address": "server.business_logic.users.login.success"
+ }
+ ]
+ }
+ }
+ ],
+ "parameters": {
+ "mappings": [
+ {
+ "headers": [
+ {
+ "address": "server.request.headers.no_cookies"
+ }
+ ],
+ "output": "_dd.appsec.fp.http.header"
+ }
+ ]
+ },
+ "evaluate": false,
+ "output": true
+ },
+ {
+ "id": "http-network-fingerprint",
+ "generator": "http_network_fingerprint",
+ "conditions": [
+ {
+ "operator": "exists",
+ "parameters": {
+ "inputs": [
+ {
+ "address": "waf.context.event"
+ },
+ {
+ "address": "server.business_logic.users.login.failure"
+ },
+ {
+ "address": "server.business_logic.users.login.success"
+ }
+ ]
+ }
+ }
+ ],
+ "parameters": {
+ "mappings": [
+ {
+ "headers": [
+ {
+ "address": "server.request.headers.no_cookies"
+ }
+ ],
+ "output": "_dd.appsec.fp.http.network"
+ }
+ ]
+ },
+ "evaluate": false,
+ "output": true
+ },
+ {
+ "id": "session-fingerprint",
+ "generator": "session_fingerprint",
+ "conditions": [
+ {
+ "operator": "exists",
+ "parameters": {
+ "inputs": [
+ {
+ "address": "waf.context.event"
+ },
+ {
+ "address": "server.business_logic.users.login.failure"
+ },
+ {
+ "address": "server.business_logic.users.login.success"
+ }
+ ]
+ }
+ }
+ ],
+ "parameters": {
+ "mappings": [
+ {
+ "cookies": [
+ {
+ "address": "server.request.cookies"
+ }
+ ],
+ "session_id": [
+ {
+ "address": "usr.session_id"
+ }
+ ],
+ "user_id": [
+ {
+ "address": "usr.id"
+ }
+ ],
+ "output": "_dd.appsec.fp.session"
+ }
+ ]
+ },
+ "evaluate": false,
+ "output": true
+ }
+ ]
+}
diff --git a/internal/appsec/testdata/sab.json b/internal/appsec/testdata/sab.json
new file mode 100644
index 0000000000..8c1ce8e0df
--- /dev/null
+++ b/internal/appsec/testdata/sab.json
@@ -0,0 +1,136 @@
+{
+ "version": "2.2",
+ "metadata": {
+ "rules_version": "1.4.2"
+ },
+ "rules": [
+ {
+ "id": "crs-933-130-block",
+ "name": "PHP Injection Attack: Global Variables Found",
+ "tags": {
+ "type": "php_code_injection",
+ "crs_id": "933130",
+ "category": "attack_attempt",
+ "confidence": "1"
+ },
+ "conditions": [
+ {
+ "parameters": {
+ "inputs": [
+ {
+ "address": "server.request.query"
+ },
+ {
+ "address": "server.request.body"
+ },
+ {
+ "address": "server.request.path_params"
+ },
+ {
+ "address": "grpc.server.request.message"
+ }
+ ],
+ "list": [
+ "$globals",
+ "$_cookie",
+ "$_env",
+ "$_files",
+ "$_get",
+ "$_post",
+ "$_request",
+ "$_server",
+ "$_session",
+ "$argc",
+ "$argv",
+ "$http_\\u200bresponse_\\u200bheader",
+ "$php_\\u200berrormsg",
+ "$http_cookie_vars",
+ "$http_env_vars",
+ "$http_get_vars",
+ "$http_post_files",
+ "$http_post_vars",
+ "$http_raw_post_data",
+ "$http_request_vars",
+ "$http_server_vars"
+ ]
+ },
+ "operator": "phrase_match"
+ }
+ ],
+ "transformers": [
+ "lowercase"
+ ]
+ }
+ ],
+ "actions": [
+ {
+ "id": "block_402",
+ "type": "block_request",
+ "parameters": {
+ "status_code": 402,
+ "type": "auto"
+ }
+ },
+ {
+ "id": "block_401",
+ "type": "block_request",
+ "parameters": {
+ "status_code": 401,
+ "type": "auto"
+ }
+ }
+ ],
+ "exclusions": [
+ {
+ "conditions": [
+ {
+ "operator": "ip_match",
+ "parameters": {
+ "data": "suspicious_ips",
+ "inputs": [
+ {
+ "address": "http.client_ip"
+ }
+ ]
+ }
+ }
+ ],
+ "id": "suspicious_ip_blocking",
+ "on_match": "block_402"
+ },
+ {
+ "conditions": [
+ {
+ "operator": "exact_match",
+ "parameters": {
+ "data": "suspicious_users",
+ "inputs": [
+ {
+ "address": "usr.id"
+ }
+ ]
+ }
+ }
+ ],
+ "transformers": [],
+ "id": "suspicious_user_blocking",
+ "on_match": "block_401"
+ }
+ ],
+ "exclusion_data": [
+ {
+ "id": "suspicious_ips",
+ "type": "ip_with_expiration",
+ "data": [
+ { "value": "1.2.3.4" }
+ ]
+ },
+ {
+ "id": "suspicious_users",
+ "type": "data_with_expiration",
+ "data": [
+ { "value": "blocked-user-1" }
+ ]
+ }
+ ]
+}
diff --git a/internal/appsec/trace/grpctrace/grpc.go b/internal/appsec/trace/grpctrace/grpc.go
deleted file mode 100644
index fff5271971..0000000000
--- a/internal/appsec/trace/grpctrace/grpc.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package grpctrace
-
-import (
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
-)
-
-// SetSecurityEventsTags sets the AppSec events span tags.
-func SetSecurityEventsTags(span ddtrace.Span, events []any) {
- if err := setSecurityEventsTags(span, events); err != nil {
- log.Error("appsec: unexpected error while creating the appsec events tags: %v", err)
- }
-}
-
-func setSecurityEventsTags(span ddtrace.Span, events []any) error {
- if events == nil {
- return nil
- }
- return trace.SetEventSpanTags(span, events)
-}
-
-// SetRequestMetadataTags sets the gRPC request metadata span tags.
-func SetRequestMetadataTags(span ddtrace.Span, md map[string][]string) {
- for h, v := range httptrace.NormalizeHTTPHeaders(md) {
- span.SetTag("grpc.metadata."+h, v)
- }
-}
diff --git a/internal/appsec/trace/httptrace/http_test.go b/internal/appsec/trace/httptrace/http_test.go
deleted file mode 100644
index 172c1c8b1d..0000000000
--- a/internal/appsec/trace/httptrace/http_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package httptrace_test
-
-import (
- "net"
- "net/netip"
- "testing"
-
- "github.com/stretchr/testify/require"
- "google.golang.org/grpc/metadata"
-
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace/httptrace"
-)
-
-func TestClientIP(t *testing.T) {
- for _, tc := range []struct {
- name string
- addr net.Addr
- md metadata.MD
- expectedClientIP string
- }{
- {
- name: "tcp-ipv4-address",
- addr: &net.TCPAddr{IP: net.ParseIP("1.2.3.4"), Port: 6789},
- expectedClientIP: "1.2.3.4",
- },
- {
- name: "tcp-ipv4-address",
- addr: &net.TCPAddr{IP: net.ParseIP("1.2.3.4"), Port: 6789},
- md: map[string][]string{"x-client-ip": {"127.0.0.1, 2.3.4.5"}},
- expectedClientIP: "2.3.4.5",
- },
- {
- name: "tcp-ipv6-address",
- addr: &net.TCPAddr{IP: net.ParseIP("::1"), Port: 6789},
- expectedClientIP: "::1",
- },
- {
- name: "udp-ipv4-address",
- addr: &net.UDPAddr{IP: net.ParseIP("1.2.3.4"), Port: 6789},
- expectedClientIP: "1.2.3.4",
- },
- {
- name: "udp-ipv6-address",
- addr: &net.UDPAddr{IP: net.ParseIP("::1"), Port: 6789},
- expectedClientIP: "::1",
- },
- {
- name: "unix-socket-address",
- addr: &net.UnixAddr{Name: "/var/my.sock"},
- },
- } {
- tc := tc
- t.Run(tc.name, func(t *testing.T) {
- _, clientIP := httptrace.ClientIPTags(tc.md, false, tc.addr.String())
- expectedClientIP, _ := netip.ParseAddr(tc.expectedClientIP)
- require.Equal(t, expectedClientIP.String(), clientIP.String())
- })
- }
-}
-
-func TestNormalizeHTTPHeaders(t *testing.T) {
- for _, tc := range []struct {
- headers map[string][]string
- expected map[string]string
- }{
- {
- headers: nil,
- expected: nil,
- },
- {
- headers: map[string][]string{
- "cookie": {"not-collected"},
- },
- expected: nil,
- },
- {
- headers: map[string][]string{
- "cookie": {"not-collected"},
- "x-forwarded-for": {"1.2.3.4,5.6.7.8"},
- },
- expected: map[string]string{
- "x-forwarded-for": "1.2.3.4,5.6.7.8",
- },
- },
- {
- headers: map[string][]string{
- "cookie": {"not-collected"},
- "x-forwarded-for": {"1.2.3.4,5.6.7.8", "9.10.11.12,13.14.15.16"},
- },
- expected: map[string]string{
- "x-forwarded-for": "1.2.3.4,5.6.7.8,9.10.11.12,13.14.15.16",
- },
- },
- } {
- headers := httptrace.NormalizeHTTPHeaders(tc.headers)
- require.Equal(t, tc.expected, headers)
- }
-}
diff --git a/internal/appsec/trace/securityholder.go b/internal/appsec/trace/securityholder.go
deleted file mode 100644
index d61ae8f494..0000000000
--- a/internal/appsec/trace/securityholder.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package trace
-
-import (
- "sync"
-)
-
-// SecurityEventsHolder is a wrapper around a thread safe security events slice.
-// The purpose of this struct is to be used by composition in an Operation to
-// allow said operation to handle security events addition/retrieval.
-type SecurityEventsHolder struct {
- events []any
- mu sync.RWMutex
-}
-
-// AddSecurityEvents adds the security events to the collected events list.
-// Thread safe.
-func (s *SecurityEventsHolder) AddSecurityEvents(events []any) {
- if len(events) == 0 {
- return
- }
-
- s.mu.Lock()
- defer s.mu.Unlock()
- s.events = append(s.events, events...)
-}
-
-// Events returns the list of stored events.
-func (s *SecurityEventsHolder) Events() []any {
- s.mu.RLock()
- defer s.mu.RUnlock()
- // Return a copy, since the lock is released upon return.
- clone := make([]any, len(s.events))
- for i, e := range s.events {
- clone[i] = e
- }
- return clone
-}
-
-// ClearEvents clears the list of stored events
-func (s *SecurityEventsHolder) ClearEvents() {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.events = s.events[0:0]
-}
diff --git a/internal/appsec/trace/tagsholder.go b/internal/appsec/trace/tagsholder.go
deleted file mode 100644
index 65ead108ec..0000000000
--- a/internal/appsec/trace/tagsholder.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package trace
-
-import (
- "encoding/json"
- "sync"
-
- "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
-)
-
-type serializableTag struct {
- tag any
-}
-
-func (t serializableTag) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.tag)
-}
-
-// TagsHolder wraps a map holding tags. The purpose of this struct is to be
-// used by composition in an Operation to allow said operation to handle tags
-// addition/retrieval.
-type TagsHolder struct {
- tags map[string]any
- mu sync.RWMutex
-}
-
-// NewTagsHolder returns a new instance of a TagsHolder struct.
-func NewTagsHolder() TagsHolder {
- return TagsHolder{tags: make(map[string]any)}
-}
-
-// SetTag adds the key/value pair to the tags map
-func (m *TagsHolder) SetTag(k string, v any) {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.tags[k] = v
-}
-
-// AddSerializableTag adds the key/value pair to the tags map. Value is serialized as JSON.
-func (m *TagsHolder) AddSerializableTag(k string, v any) {
- m.mu.Lock()
- defer m.mu.Unlock()
- m.tags[k] = serializableTag{tag: v}
-}
-
-// Tags returns a copy of the aggregated tags map (normal and serialized)
-func (m *TagsHolder) Tags() map[string]any {
- tags := make(map[string]any, len(m.tags))
- m.mu.RLock()
- defer m.mu.RUnlock()
- for k, v := range m.tags {
- tags[k] = v
- marshaler, ok := v.(serializableTag)
- if !ok {
- continue
- }
- if marshaled, err := marshaler.MarshalJSON(); err == nil {
- tags[k] = string(marshaled)
- } else {
- log.Debug("appsec: could not marshal serializable tag %s: %v", k, err)
- }
- }
- return tags
-}
-
-var _ TagSetter = (*TagsHolder)(nil) // *TagsHolder must implement TagSetter
diff --git a/internal/appsec/trace/trace.go b/internal/appsec/trace/trace.go
deleted file mode 100644
index 0700cc8581..0000000000
--- a/internal/appsec/trace/trace.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-// Package trace provides functions to annotate trace spans with AppSec related
-// information.
-package trace
-
-import (
- "encoding/json"
- "fmt"
-
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/samplernames"
-)
-
-// BlockedRequestTag used to convey whether a request is blocked
-const BlockedRequestTag = "appsec.blocked"
-
-// TagSetter is the interface needed to set a span tag.
-type TagSetter interface {
- SetTag(string, any)
-}
-
-// NoopTagSetter is a TagSetter that does nothing. Useful when no tracer
-// Span is available, but a TagSetter is assumed.
-type NoopTagSetter struct{}
-
-func (NoopTagSetter) SetTag(string, any) {
- // Do nothing
-}
-
-// SetAppSecEnabledTags sets the AppSec-specific span tags that are expected to
-// be in the web service entry span (span of type `web`) when AppSec is enabled.
-func SetAppSecEnabledTags(span TagSetter) {
- span.SetTag("_dd.appsec.enabled", 1)
- span.SetTag("_dd.runtime_family", "go")
-}
-
-// SetEventSpanTags sets the security event span tags into the service entry span.
-func SetEventSpanTags(span TagSetter, events []any) error {
- if len(events) == 0 {
- return nil
- }
-
- // Set the appsec event span tag
- val, err := makeEventTagValue(events)
- if err != nil {
- return err
- }
- span.SetTag("_dd.appsec.json", string(val))
- // Keep this span due to the security event
- //
- // This is a workaround to tell the tracer that the trace was kept by AppSec.
- // Passing any other value than `appsec.SamplerAppSec` has no effect.
- // Customers should use `span.SetTag(ext.ManualKeep, true)` pattern
- // to keep the trace, manually.
- span.SetTag(ext.ManualKeep, samplernames.AppSec)
- span.SetTag("_dd.origin", "appsec")
- // Set the appsec.event tag needed by the appsec backend
- span.SetTag("appsec.event", true)
- return nil
-}
-
-// SetTags fills the span tags using the key/value pairs found in `tags`
-func SetTags[V any](span TagSetter, tags map[string]V) {
- for k, v := range tags {
- span.SetTag(k, v)
- }
-}
-
-// Create the value of the security event tag.
-func makeEventTagValue(events []any) (json.RawMessage, error) {
- type eventTagValue struct {
- Triggers []any `json:"triggers"`
- }
-
- tag, err := json.Marshal(eventTagValue{events})
- if err != nil {
- return nil, fmt.Errorf("unexpected error while serializing the appsec event span tag: %v", err)
- }
-
- return tag, nil
-}
diff --git a/internal/appsec/waf.go b/internal/appsec/waf.go
deleted file mode 100644
index c1ec4c4756..0000000000
--- a/internal/appsec/waf.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Unless explicitly stated otherwise all files in this repository are licensed
-// under the Apache License Version 2.0.
-// This product includes software developed at Datadog (https://www.datadoghq.com/).
-// Copyright 2016 Datadog, Inc.
-
-package appsec
-
-import (
- "github.com/DataDog/appsec-internal-go/limiter"
- waf "github.com/DataDog/go-libddwaf/v3"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
-)
-
-func (a *appsec) swapWAF(rules config.RulesFragment) (err error) {
- // Instantiate a new WAF handle and verify its state
- newHandle, err := waf.NewHandle(rules, a.cfg.Obfuscator.KeyRegex, a.cfg.Obfuscator.ValueRegex)
- if err != nil {
- return err
- }
-
- // Close the WAF handle in case of an error in what's following
- defer func() {
- if err != nil {
- newHandle.Close()
- }
- }()
-
- newRoot := dyngo.NewRootOperation()
- for _, fn := range wafEventListeners {
- fn(newHandle, a.cfg, a.limiter, newRoot)
- }
-
- // Hot-swap dyngo's root operation
- dyngo.SwapRootOperation(newRoot)
-
- // Close old handle.
- // Note that concurrent requests are still using it, and it will be released
- // only when no more requests use it.
- // TODO: implement in dyngo ref-counting of the root operation so we can
- // rely on a Finish event listener on the root operation instead?
- // Avoiding saving the current WAF handle would guarantee no one is
- // accessing a.wafHandle while we swap
- oldHandle := a.wafHandle
- a.wafHandle = newHandle
- if oldHandle != nil {
- oldHandle.Close()
- }
-
- return nil
-}
-
-type wafEventListener func(*waf.Handle, *config.Config, limiter.Limiter, dyngo.Operation)
-
-// wafEventListeners is the global list of event listeners registered by contribs at init time. This
-// is thread-safe assuming all writes (via AddWAFEventListener) are performed within `init`
-// functions; so this is written to only during initialization, and is read from concurrently only
-// during runtime when no writes are happening anymore.
-var wafEventListeners []wafEventListener
-
-// AddWAFEventListener adds a new WAF event listener to be registered whenever a new root operation
-// is created. The normal way to use this is to call it from a `func init() {}` so that it is
-// guaranteed to have happened before any listened to event may be emitted.
-func AddWAFEventListener(fn wafEventListener) {
- wafEventListeners = append(wafEventListeners, fn)
-}
diff --git a/internal/appsec/waf_test.go b/internal/appsec/waf_test.go
index bc760ee20c..7e169ffb7a 100644
--- a/internal/appsec/waf_test.go
+++ b/internal/appsec/waf_test.go
@@ -30,14 +30,15 @@ import (
sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/config"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/dyngo"
"gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/ossec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
- _ "github.com/glebarez/go-sqlite"
"github.com/stretchr/testify/require"
+ _ "modernc.org/sqlite"
)
func TestCustomRules(t *testing.T) {
@@ -51,7 +52,7 @@ func TestCustomRules(t *testing.T) {
// Start and trace an HTTP server
mux := httptrace.NewServeMux()
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("Hello World!\n"))
})
@@ -108,10 +109,10 @@ func TestUserRules(t *testing.T) {
// Start and trace an HTTP server
mux := httptrace.NewServeMux()
- mux.HandleFunc("/hello", func(w http.ResponseWriter, r *http.Request) {
+ mux.HandleFunc("/hello", func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("Hello World!\n"))
})
- mux.HandleFunc("/response-header", func(w http.ResponseWriter, r *http.Request) {
+ mux.HandleFunc("/response-header", func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("match-response-header", "match-response-header")
w.WriteHeader(204)
})
@@ -174,7 +175,7 @@ func TestWAF(t *testing.T) {
// Start and trace an HTTP server
mux := httptrace.NewServeMux()
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("Hello World!\n"))
})
mux.HandleFunc("/body", func(w http.ResponseWriter, r *http.Request) {
@@ -330,7 +331,7 @@ func TestBlocking(t *testing.T) {
// Start and trace an HTTP server
mux := httptrace.NewServeMux()
- mux.HandleFunc("/ip", func(w http.ResponseWriter, r *http.Request) {
+ mux.HandleFunc("/ip", func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("Hello World!\n"))
})
mux.HandleFunc("/user", func(w http.ResponseWriter, r *http.Request) {
@@ -743,6 +744,124 @@ func TestRASPLFI(t *testing.T) {
}
}
+func TestSuspiciousAttackerBlocking(t *testing.T) {
+ t.Setenv("DD_APPSEC_RULES", "testdata/sab.json")
+ appsec.Start()
+ defer appsec.Stop()
+ if !appsec.Enabled() {
+ t.Skip("AppSec needs to be enabled for this test")
+ }
+
+ const bodyBlockingRule = "crs-933-130-block"
+
+ // Start and trace an HTTP server
+ mux := httptrace.NewServeMux()
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ if err := pAppsec.SetUser(r.Context(), r.Header.Get("test-usr")); err != nil {
+ return
+ }
+ buf := new(strings.Builder)
+ io.Copy(buf, r.Body)
+ if err := pAppsec.MonitorParsedHTTPBody(r.Context(), buf.String()); err != nil {
+ return
+ }
+ w.Write([]byte("Hello World!\n"))
+ })
+ srv := httptest.NewServer(mux)
+ defer srv.Close()
+
+ for _, tc := range []struct {
+ name string
+ headers map[string]string
+ status int
+ ruleMatch string
+ attack string
+ }{
+ {
+ name: "ip/not-suspicious/no-attack",
+ status: 200,
+ },
+ {
+ name: "ip/suspicious/no-attack",
+ headers: map[string]string{"x-forwarded-for": "1.2.3.4"},
+ status: 200,
+ },
+ {
+ name: "ip/not-suspicious/attack",
+ status: 200,
+ attack: "$globals",
+ ruleMatch: bodyBlockingRule,
+ },
+ {
+ name: "ip/suspicious/attack",
+ headers: map[string]string{"x-forwarded-for": "1.2.3.4"},
+ status: 402,
+ attack: "$globals",
+ ruleMatch: bodyBlockingRule,
+ },
+ {
+ name: "user/not-suspicious/no-attack",
+ status: 200,
+ },
+ {
+ name: "user/suspicious/no-attack",
+ headers: map[string]string{"test-usr": "blocked-user-1"},
+ status: 200,
+ },
+ {
+ name: "user/not-suspicious/attack",
+ status: 200,
+ attack: "$globals",
+ ruleMatch: bodyBlockingRule,
+ },
+ {
+ name: "user/suspicious/attack",
+ headers: map[string]string{"test-usr": "blocked-user-1"},
+ status: 401,
+ attack: "$globals",
+ ruleMatch: bodyBlockingRule,
+ },
+ {
+ name: "ip+user/suspicious/no-attack",
+ headers: map[string]string{"x-forwarded-for": "1.2.3.4", "test-usr": "blocked-user-1"},
+ status: 200,
+ },
+ {
+ name: "ip+user/suspicious/attack",
+ headers: map[string]string{"x-forwarded-for": "1.2.3.4", "test-usr": "blocked-user-1"},
+ status: 402,
+ attack: "$globals",
+ ruleMatch: bodyBlockingRule,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+ req, err := http.NewRequest("POST", srv.URL, strings.NewReader(tc.attack))
+ require.NoError(t, err)
+ for k, v := range tc.headers {
+ req.Header.Set(k, v)
+ }
+ res, err := srv.Client().Do(req)
+ require.NoError(t, err)
+ defer res.Body.Close()
+ if tc.ruleMatch != "" {
+ spans := mt.FinishedSpans()
+ require.Len(t, spans, 1)
+ require.Contains(t, spans[0].Tag("_dd.appsec.json"), tc.ruleMatch)
+ }
+ require.Equal(t, tc.status, res.StatusCode)
+ b, err := io.ReadAll(res.Body)
+ require.NoError(t, err)
+ if tc.status == 200 {
+ require.Equal(t, "Hello World!\n", string(b))
+ } else {
+ require.NotEqual(t, "Hello World!\n", string(b))
+ }
+ })
+ }
+}
+
// BenchmarkSampleWAFContext benchmarks the creation of a WAF context and running the WAF on a request/response pair
// This is a basic sample of what could happen in a real-world scenario.
func BenchmarkSampleWAFContext(b *testing.B) {
@@ -768,10 +887,10 @@ func BenchmarkSampleWAFContext(b *testing.B) {
_, err = ctx.Run(
waf.RunAddressData{
Persistent: map[string]any{
- httpsec.HTTPClientIPAddr: "1.1.1.1",
- httpsec.ServerRequestMethodAddr: "GET",
- httpsec.ServerRequestRawURIAddr: "/",
- httpsec.ServerRequestHeadersNoCookiesAddr: map[string][]string{
+ addresses.ClientIPAddr: "1.1.1.1",
+ addresses.ServerRequestMethodAddr: "GET",
+ addresses.ServerRequestRawURIAddr: "/",
+ addresses.ServerRequestHeadersNoCookiesAddr: map[string][]string{
"host": {"example.com"},
"content-length": {"0"},
"Accept": {"application/json"},
@@ -779,13 +898,13 @@ func BenchmarkSampleWAFContext(b *testing.B) {
"Accept-Encoding": {"gzip"},
"Connection": {"close"},
},
- httpsec.ServerRequestCookiesAddr: map[string][]string{
+ addresses.ServerRequestCookiesAddr: map[string][]string{
"cookie": {"session=1234"},
},
- httpsec.ServerRequestQueryAddr: map[string][]string{
+ addresses.ServerRequestQueryAddr: map[string][]string{
"query": {"value"},
},
- httpsec.ServerRequestPathParamsAddr: map[string]string{
+ addresses.ServerRequestPathParamsAddr: map[string]string{
"param": "value",
},
},
@@ -799,12 +918,12 @@ func BenchmarkSampleWAFContext(b *testing.B) {
_, err = ctx.Run(
waf.RunAddressData{
Persistent: map[string]any{
- httpsec.ServerResponseHeadersNoCookiesAddr: map[string][]string{
+ addresses.ServerResponseHeadersNoCookiesAddr: map[string][]string{
"content-type": {"application/json"},
"content-length": {"0"},
"Connection": {"close"},
},
- httpsec.ServerResponseStatusAddr: 200,
+ addresses.ServerResponseStatusAddr: 200,
},
})
@@ -816,6 +935,54 @@ func BenchmarkSampleWAFContext(b *testing.B) {
}
}
+func TestAttackerFingerprinting(t *testing.T) {
+ t.Setenv("DD_APPSEC_RULES", "testdata/fp.json")
+ appsec.Start()
+ defer appsec.Stop()
+ if !appsec.Enabled() {
+ t.Skip("AppSec needs to be enabled for this test")
+ }
+
+ // Start and trace an HTTP server
+ mux := httptrace.NewServeMux()
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ pAppsec.TrackUserLoginSuccessEvent(
+ r.Context(),
+ "toto",
+ map[string]string{},
+ tracer.WithUserSessionID("sessionID"))
+
+ pAppsec.MonitorParsedHTTPBody(r.Context(), map[string]string{"key": "value"})
+
+ w.Write([]byte("Hello World!\n"))
+ })
+ srv := httptest.NewServer(mux)
+ defer srv.Close()
+
+ mt := mocktracer.Start()
+ defer mt.Stop()
+ req, err := http.NewRequest("POST", srv.URL+"/test?x=1", nil)
+ require.NoError(t, err)
+ req.AddCookie(&http.Cookie{Name: "cookie", Value: "value"})
+ resp, err := srv.Client().Do(req)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+
+ require.Len(t, mt.FinishedSpans(), 1)
+
+ tags := mt.FinishedSpans()[0].Tags()
+
+ require.Contains(t, tags, "_dd.appsec.fp.http.header")
+ require.Contains(t, tags, "_dd.appsec.fp.http.endpoint")
+ require.Contains(t, tags, "_dd.appsec.fp.http.network")
+ require.Contains(t, tags, "_dd.appsec.fp.session")
+
+ require.Regexp(t, `^hdr-`, tags["_dd.appsec.fp.http.header"])
+ require.Regexp(t, `^http-`, tags["_dd.appsec.fp.http.endpoint"])
+ require.Regexp(t, `^ssn-`, tags["_dd.appsec.fp.session"])
+ require.Regexp(t, `^net-`, tags["_dd.appsec.fp.http.network"])
+}
+
func init() {
// This permits running the tests locally without defining the env var manually
// We do this because the default go-libddwaf timeout value is too small and makes the tests timeout for no reason
diff --git a/internal/appsec/waf_unit_test.go b/internal/appsec/waf_unit_test.go
index 164f249864..ce2af140a6 100644
--- a/internal/appsec/waf_unit_test.go
+++ b/internal/appsec/waf_unit_test.go
@@ -7,13 +7,14 @@ package appsec
import (
"encoding/json"
+ "testing"
+
internal "github.com/DataDog/appsec-internal-go/appsec"
waf "github.com/DataDog/go-libddwaf/v3"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/listener/httpsec"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/trace"
- "testing"
"github.com/stretchr/testify/require"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/appsec/emitter/waf/addresses"
)
func TestAPISecuritySchemaCollection(t *testing.T) {
@@ -112,7 +113,7 @@ func TestAPISecuritySchemaCollection(t *testing.T) {
{
name: "headers",
addresses: map[string]any{
- httpsec.ServerRequestHeadersNoCookiesAddr: map[string][]string{
+ addresses.ServerRequestHeadersNoCookiesAddr: map[string][]string{
"my-header": {"is-beautiful"},
},
},
@@ -123,7 +124,7 @@ func TestAPISecuritySchemaCollection(t *testing.T) {
{
name: "path-params",
addresses: map[string]any{
- httpsec.ServerRequestPathParamsAddr: map[string]string{
+ addresses.ServerRequestPathParamsAddr: map[string]string{
"my-path-param": "is-beautiful",
},
},
@@ -134,7 +135,7 @@ func TestAPISecuritySchemaCollection(t *testing.T) {
{
name: "query",
addresses: map[string]any{
- httpsec.ServerRequestQueryAddr: map[string][]string{"my-query": {"is-beautiful"}, "my-query-2": {"so-pretty"}},
+ addresses.ServerRequestQueryAddr: map[string][]string{"my-query": {"is-beautiful"}, "my-query-2": {"so-pretty"}},
},
tags: map[string]string{
"_dd.appsec.s.req.query": `[{"my-query":[[[8]],{"len":1}],"my-query-2":[[[8]],{"len":1}]}]`,
@@ -143,13 +144,13 @@ func TestAPISecuritySchemaCollection(t *testing.T) {
{
name: "combined",
addresses: map[string]any{
- httpsec.ServerRequestHeadersNoCookiesAddr: map[string][]string{
+ addresses.ServerRequestHeadersNoCookiesAddr: map[string][]string{
"my-header": {"is-beautiful"},
},
- httpsec.ServerRequestPathParamsAddr: map[string]string{
+ addresses.ServerRequestPathParamsAddr: map[string]string{
"my-path-param": "is-beautiful",
},
- httpsec.ServerRequestQueryAddr: map[string][]string{"my-query": {"is-beautiful"}, "my-query-2": {"so-pretty"}},
+ addresses.ServerRequestQueryAddr: map[string][]string{"my-query": {"is-beautiful"}, "my-query-2": {"so-pretty"}},
},
tags: map[string]string{
"_dd.appsec.s.req.headers": `[{"my-header":[[[8]],{"len":1}]}]`,
@@ -175,13 +176,10 @@ func TestAPISecuritySchemaCollection(t *testing.T) {
wafRes, err := wafCtx.Run(runData)
require.NoError(t, err)
require.True(t, wafRes.HasDerivatives())
- tagsHolder := trace.NewTagsHolder()
for k, v := range wafRes.Derivatives {
- tagsHolder.AddSerializableTag(k, v)
- }
-
- for tag, val := range tagsHolder.Tags() {
- require.Equal(t, tc.tags[tag], val)
+ res, err := json.Marshal(v)
+ require.NoError(t, err)
+ require.Equal(t, tc.tags[k], string(res))
}
})
}
diff --git a/internal/civisibility/constants/env.go b/internal/civisibility/constants/env.go
index ebc00fcb69..66e6de7636 100644
--- a/internal/civisibility/constants/env.go
+++ b/internal/civisibility/constants/env.go
@@ -24,4 +24,17 @@ const (
// This environment variable should be set to your Datadog API key, allowing the agentless mode to authenticate and
// send data directly to the Datadog platform.
APIKeyEnvironmentVariable = "DD_API_KEY"
+
+ // CIVisibilityTestSessionNameEnvironmentVariable indicate the test session name to be used on CI Visibility payloads
+ CIVisibilityTestSessionNameEnvironmentVariable = "DD_TEST_SESSION_NAME"
+
+ // CIVisibilityFlakyRetryEnabledEnvironmentVariable kill-switch that allows to explicitly disable retries even if the remote setting is enabled.
+ // This environment variable should be set to "0" or "false" to disable the flaky retry feature.
+ CIVisibilityFlakyRetryEnabledEnvironmentVariable = "DD_CIVISIBILITY_FLAKY_RETRY_ENABLED"
+
+ // CIVisibilityFlakyRetryCountEnvironmentVariable indicates the maximum number of retry attempts for a single test case.
+ CIVisibilityFlakyRetryCountEnvironmentVariable = "DD_CIVISIBILITY_FLAKY_RETRY_COUNT"
+
+ // CIVisibilityTotalFlakyRetryCountEnvironmentVariable indicates the maximum number of retry attempts for the entire session.
+ CIVisibilityTotalFlakyRetryCountEnvironmentVariable = "DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT"
)
diff --git a/internal/civisibility/constants/git.go b/internal/civisibility/constants/git.go
index 6265be391a..a373132950 100644
--- a/internal/civisibility/constants/git.go
+++ b/internal/civisibility/constants/git.go
@@ -49,4 +49,13 @@ const (
// GitTag indicates the current git tag.
// This constant is used to tag traces with the tag name associated with the current commit.
GitTag = "git.tag"
+
+ // GitHeadCommit indicates the GIT head commit hash.
+ GitHeadCommit = "git.commit.head_sha"
+
+ // GitPrBaseCommit indicates the GIT PR base commit hash.
+ GitPrBaseCommit = "git.pull_request.base_branch_sha"
+
+ // GitPrBaseBranch indicates the GIT PR base branch name.
+ GitPrBaseBranch = "git.pull_request.base_branch"
)
diff --git a/internal/civisibility/constants/tags.go b/internal/civisibility/constants/tags.go
index 4563cb8839..0a6d690835 100644
--- a/internal/civisibility/constants/tags.go
+++ b/internal/civisibility/constants/tags.go
@@ -10,6 +10,10 @@ const (
// This tag helps in identifying the source of the trace data.
Origin = "_dd.origin"
+ // LogicalCPUCores is a tag used to indicate the number of logical cpu cores
+ // This tag is used by the backend to perform calculations
+ LogicalCPUCores = "_dd.host.vcpu_count"
+
// CIAppTestOrigin defines the CIApp test origin value.
// This constant is used to tag traces that originate from CIApp test executions.
CIAppTestOrigin = "ciapp-test"
diff --git a/internal/civisibility/constants/test_tags.go b/internal/civisibility/constants/test_tags.go
index 3a621a20be..7248d82450 100644
--- a/internal/civisibility/constants/test_tags.go
+++ b/internal/civisibility/constants/test_tags.go
@@ -46,6 +46,10 @@ const (
// This constant is used to tag traces with the line number in the source file where the test starts.
TestSourceStartLine = "test.source.start"
+ // TestSourceEndLine indicates the line of the source file where the test ends.
+ // This constant is used to tag traces with the line number in the source file where the test ends.
+ TestSourceEndLine = "test.source.end"
+
// TestCodeOwners indicates the test code owners.
// This constant is used to tag traces with the code owners responsible for the test.
TestCodeOwners = "test.codeowners"
@@ -61,6 +65,21 @@ const (
// TestCommandWorkingDirectory indicates the test command working directory relative to the source root.
// This constant is used to tag traces with the working directory path relative to the source root.
TestCommandWorkingDirectory = "test.working_directory"
+
+ // TestSessionName indicates the test session name
+ // This constant is used to tag traces with the test session name
+ TestSessionName = "test_session.name"
+
+ // TestIsNew indicates a new test
+ // This constant is used to tag test events that are detected as new by early flake detection
+ TestIsNew = "test.is_new"
+
+ // TestIsRetry indicates a retry execution
+ // This constant is used to tag test events that are part of a retry execution
+ TestIsRetry = "test.is_retry"
+
+ // TestEarlyFlakeDetectionRetryAborted indicates a retry abort reason by the early flake detection feature
+ TestEarlyFlakeDetectionRetryAborted = "test.early_flake.abort_reason"
)
// Define valid test status types.
diff --git a/internal/civisibility/integrations/civisibility.go b/internal/civisibility/integrations/civisibility.go
index aaa92c04bb..aca1d2e5ee 100644
--- a/internal/civisibility/integrations/civisibility.go
+++ b/internal/civisibility/integrations/civisibility.go
@@ -15,8 +15,10 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal"
"gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
"gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
)
// ciVisibilityCloseAction defines an action to be executed when CI visibility is closing.
@@ -55,6 +57,14 @@ func InitializeCIVisibilityMock() mocktracer.Tracer {
func internalCiVisibilityInitialization(tracerInitializer func([]tracer.StartOption)) {
ciVisibilityInitializationOnce.Do(func() {
+ // check the debug flag to enable debug logs. The tracer initialization happens
+ // after the CI Visibility initialization so we need to handle this flag ourselves
+ if internal.BoolEnv("DD_TRACE_DEBUG", false) {
+ log.SetLevel(log.LevelDebug)
+ }
+
+ log.Debug("civisibility: initializing")
+
// Since calling this method indicates we are in CI Visibility mode, set the environment variable.
_ = os.Setenv(constants.CIVisibilityEnabledEnvironmentVariable, "1")
@@ -66,22 +76,29 @@ func internalCiVisibilityInitialization(tracerInitializer func([]tracer.StartOpt
// Preload all CI, Git, and CodeOwners tags.
ciTags := utils.GetCITags()
+ _ = utils.GetCIMetrics()
// Check if DD_SERVICE has been set; otherwise default to the repo name (from the spec).
var opts []tracer.StartOption
- if v := os.Getenv("DD_SERVICE"); v == "" {
+ serviceName := os.Getenv("DD_SERVICE")
+ if serviceName == "" {
if repoURL, ok := ciTags[constants.GitRepositoryURL]; ok {
// regex to sanitize the repository url to be used as a service name
- repoRegex := regexp.MustCompile(`(?m)/([a-zA-Z0-9\\\-_.]*)$`)
+ repoRegex := regexp.MustCompile(`(?m)/([a-zA-Z0-9\-_.]*)$`)
matches := repoRegex.FindStringSubmatch(repoURL)
if len(matches) > 1 {
repoURL = strings.TrimSuffix(matches[1], ".git")
}
- opts = append(opts, tracer.WithService(repoURL))
+ serviceName = repoURL
+ opts = append(opts, tracer.WithService(serviceName))
}
}
+ // Initializing additional features asynchronously
+ go func() { ensureAdditionalFeaturesInitialization(serviceName) }()
+
// Initialize the tracer
+ log.Debug("civisibility: initializing tracer")
tracerInitializer(opts)
// Handle SIGINT and SIGTERM signals to ensure we close all open spans and flush the tracer before exiting
@@ -104,13 +121,16 @@ func PushCiVisibilityCloseAction(action ciVisibilityCloseAction) {
// ExitCiVisibility executes all registered close actions and stops the tracer.
func ExitCiVisibility() {
+ log.Debug("civisibility: exiting")
closeActionsMutex.Lock()
defer closeActionsMutex.Unlock()
defer func() {
closeActions = []ciVisibilityCloseAction{}
+ log.Debug("civisibility: flushing and stopping tracer")
tracer.Flush()
tracer.Stop()
+ log.Debug("civisibility: done.")
}()
for _, v := range closeActions {
v()
diff --git a/internal/civisibility/integrations/civisibility_features.go b/internal/civisibility/integrations/civisibility_features.go
new file mode 100644
index 0000000000..31f1bff760
--- /dev/null
+++ b/internal/civisibility/integrations/civisibility_features.go
@@ -0,0 +1,275 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package integrations
+
+import (
+ "fmt"
+ "os"
+ "slices"
+ "sync"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/net"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+const (
+ DefaultFlakyRetryCount = 5
+ DefaultFlakyTotalRetryCount = 1_000
+)
+
+type (
+ // FlakyRetriesSetting struct to hold all the settings related to flaky tests retries
+ FlakyRetriesSetting struct {
+ RetryCount int64
+ TotalRetryCount int64
+ RemainingTotalRetryCount int64
+ }
+
+ searchCommitsResponse struct {
+ LocalCommits []string
+ RemoteCommits []string
+ IsOk bool
+ }
+)
+
+var (
+ // additionalFeaturesInitializationOnce ensures we do the additional features initialization just once
+ additionalFeaturesInitializationOnce sync.Once
+
+ // ciVisibilityRapidClient contains the http rapid client to do CI Visibility queries and upload to the rapid backend
+ ciVisibilityClient net.Client
+
+ // ciVisibilitySettings contains the CI Visibility settings for this session
+ ciVisibilitySettings net.SettingsResponseData
+
+ // ciVisibilityEarlyFlakyDetectionSettings contains the CI Visibility Early Flake Detection data for this session
+ ciVisibilityEarlyFlakyDetectionSettings net.EfdResponseData
+
+ // ciVisibilityFlakyRetriesSettings contains the CI Visibility Flaky Retries settings for this session
+ ciVisibilityFlakyRetriesSettings FlakyRetriesSetting
+)
+
+// ensureAdditionalFeaturesInitialization initialize all the additional features
+func ensureAdditionalFeaturesInitialization(serviceName string) {
+ additionalFeaturesInitializationOnce.Do(func() {
+ log.Debug("civisibility: initializing additional features")
+
+ // Create the CI Visibility client
+ ciVisibilityClient = net.NewClientWithServiceName(serviceName)
+ if ciVisibilityClient == nil {
+ log.Error("civisibility: error getting the ci visibility http client")
+ return
+ }
+
+ // upload the repository changes
+ var uploadChannel = make(chan struct{})
+ go func() {
+ bytes, err := uploadRepositoryChanges()
+ if err != nil {
+ log.Error("civisibility: error uploading repository changes: %v", err)
+ } else {
+ log.Debug("civisibility: uploaded %v bytes in pack files", bytes)
+ }
+ uploadChannel <- struct{}{}
+ }()
+
+ // Get the CI Visibility settings payload for this test session
+ ciSettings, err := ciVisibilityClient.GetSettings()
+ if err != nil {
+ log.Error("civisibility: error getting CI visibility settings: %v", err)
+ } else if ciSettings != nil {
+ ciVisibilitySettings = *ciSettings
+ }
+
+ // check if we need to wait for the upload to finish and repeat the settings request or we can just continue
+ if ciVisibilitySettings.RequireGit {
+ log.Debug("civisibility: waiting for the git upload to finish and repeating the settings request")
+ <-uploadChannel
+ ciSettings, err = ciVisibilityClient.GetSettings()
+ if err != nil {
+ log.Error("civisibility: error getting CI visibility settings: %v", err)
+ } else if ciSettings != nil {
+ ciVisibilitySettings = *ciSettings
+ }
+ } else {
+ log.Debug("civisibility: no need to wait for the git upload to finish")
+ // Enqueue a close action to wait for the upload to finish before finishing the process
+ PushCiVisibilityCloseAction(func() {
+ <-uploadChannel
+ })
+ }
+
+ // if early flake detection is enabled then we run the early flake detection request
+ if ciVisibilitySettings.EarlyFlakeDetection.Enabled {
+ ciEfdData, err := ciVisibilityClient.GetEarlyFlakeDetectionData()
+ if err != nil {
+ log.Error("civisibility: error getting CI visibility early flake detection data: %v", err)
+ } else if ciEfdData != nil {
+ ciVisibilityEarlyFlakyDetectionSettings = *ciEfdData
+ log.Debug("civisibility: early flake detection data loaded.")
+ }
+ }
+
+ // if flaky test retries is enabled then let's load the flaky retries settings
+ if ciVisibilitySettings.FlakyTestRetriesEnabled {
+ flakyRetryEnabledByEnv := internal.BoolEnv(constants.CIVisibilityFlakyRetryEnabledEnvironmentVariable, true)
+ if flakyRetryEnabledByEnv {
+ totalRetriesCount := (int64)(internal.IntEnv(constants.CIVisibilityTotalFlakyRetryCountEnvironmentVariable, DefaultFlakyTotalRetryCount))
+ retryCount := (int64)(internal.IntEnv(constants.CIVisibilityFlakyRetryCountEnvironmentVariable, DefaultFlakyRetryCount))
+ ciVisibilityFlakyRetriesSettings = FlakyRetriesSetting{
+ RetryCount: retryCount,
+ TotalRetryCount: totalRetriesCount,
+ RemainingTotalRetryCount: totalRetriesCount,
+ }
+ log.Debug("civisibility: automatic test retries enabled [retryCount: %v, totalRetryCount: %v]", retryCount, totalRetriesCount)
+ } else {
+ log.Warn("civisibility: flaky test retries was disabled by the environment variable")
+ ciVisibilitySettings.FlakyTestRetriesEnabled = false
+ }
+ }
+ })
+}
+
+// GetSettings gets the settings from the backend settings endpoint
+func GetSettings() *net.SettingsResponseData {
+ // call to ensure the additional features initialization is completed (service name can be null here)
+ ensureAdditionalFeaturesInitialization("")
+ return &ciVisibilitySettings
+}
+
+// GetEarlyFlakeDetectionSettings gets the early flake detection known tests data
+func GetEarlyFlakeDetectionSettings() *net.EfdResponseData {
+ // call to ensure the additional features initialization is completed (service name can be null here)
+ ensureAdditionalFeaturesInitialization("")
+ return &ciVisibilityEarlyFlakyDetectionSettings
+}
+
+// GetFlakyRetriesSettings gets the flaky retries settings
+func GetFlakyRetriesSettings() *FlakyRetriesSetting {
+ // call to ensure the additional features initialization is completed (service name can be null here)
+ ensureAdditionalFeaturesInitialization("")
+ return &ciVisibilityFlakyRetriesSettings
+}
+
+func uploadRepositoryChanges() (bytes int64, err error) {
+ // get the search commits response
+ initialCommitData, err := getSearchCommits()
+ if err != nil {
+ return 0, fmt.Errorf("civisibility: error getting the search commits response: %s", err.Error())
+ }
+
+ // let's check if we could retrieve commit data
+ if !initialCommitData.IsOk {
+ return 0, nil
+ }
+
+ // if there are no commits then we don't need to do anything
+ if !initialCommitData.hasCommits() {
+ log.Debug("civisibility: no commits found")
+ return 0, nil
+ }
+
+ // If:
+ // - we have local commits
+ // - there are not missing commits (backend has the total number of local commits already)
+ // then we are good to go with it, we don't need to check if we need to unshallow or anything and just go with that.
+ if initialCommitData.hasCommits() && len(initialCommitData.missingCommits()) == 0 {
+ log.Debug("civisibility: initial commit data has everything already, we don't need to upload anything")
+ return 0, nil
+ }
+
+ // there's some missing commits on the backend, first we need to check if we need to unshallow before sending anything...
+ hasBeenUnshallowed, err := utils.UnshallowGitRepository()
+ if err != nil || !hasBeenUnshallowed {
+ if err != nil {
+ log.Warn(err.Error())
+ }
+ // if unshallowing the repository failed or if there's nothing to unshallow then we try to upload the packfiles from
+ // the initial commit data
+
+ // send the pack file with the missing commits
+ return sendObjectsPackFile(initialCommitData.LocalCommits[0], initialCommitData.missingCommits(), initialCommitData.RemoteCommits)
+ }
+
+ // after unshallowing the repository we need to get the search commits to calculate the missing commits again
+ commitsData, err := getSearchCommits()
+ if err != nil {
+ return 0, fmt.Errorf("civisibility: error getting the search commits response: %s", err.Error())
+ }
+
+ // let's check if we could retrieve commit data
+ if !initialCommitData.IsOk {
+ return 0, nil
+ }
+
+ // send the pack file with the missing commits
+ return sendObjectsPackFile(commitsData.LocalCommits[0], commitsData.missingCommits(), commitsData.RemoteCommits)
+}
+
+// getSearchCommits gets the search commits response with the local and remote commits
+func getSearchCommits() (*searchCommitsResponse, error) {
+ localCommits := utils.GetLastLocalGitCommitShas()
+ if len(localCommits) == 0 {
+ log.Debug("civisibility: no local commits found")
+ return newSearchCommitsResponse(nil, nil, false), nil
+ }
+
+ log.Debug("civisibility: local commits found: %d", len(localCommits))
+ remoteCommits, err := ciVisibilityClient.GetCommits(localCommits)
+ return newSearchCommitsResponse(localCommits, remoteCommits, true), err
+}
+
+// newSearchCommitsResponse creates a new search commits response
+func newSearchCommitsResponse(localCommits []string, remoteCommits []string, isOk bool) *searchCommitsResponse {
+ return &searchCommitsResponse{
+ LocalCommits: localCommits,
+ RemoteCommits: remoteCommits,
+ IsOk: isOk,
+ }
+}
+
+// hasCommits returns true if the search commits response has commits
+func (r *searchCommitsResponse) hasCommits() bool {
+ return len(r.LocalCommits) > 0
+}
+
+// missingCommits returns the missing commits between the local and remote commits
+func (r *searchCommitsResponse) missingCommits() []string {
+ var missingCommits []string
+ for _, localCommit := range r.LocalCommits {
+ if !slices.Contains(r.RemoteCommits, localCommit) {
+ missingCommits = append(missingCommits, localCommit)
+ }
+ }
+
+ return missingCommits
+}
+
+func sendObjectsPackFile(commitSha string, commitsToInclude []string, commitsToExclude []string) (bytes int64, err error) {
+ // get the pack files to send
+ packFiles := utils.CreatePackFiles(commitsToInclude, commitsToExclude)
+ if len(packFiles) == 0 {
+ log.Debug("civisibility: no pack files to send")
+ return 0, nil
+ }
+
+ // send the pack files
+ log.Debug("civisibility: sending pack file with missing commits. files: %v", packFiles)
+
+ // try to remove the pack files after sending them
+ defer func(files []string) {
+ // best effort to remove the pack files after sending
+ for _, file := range files {
+ _ = os.Remove(file)
+ }
+ }(packFiles)
+
+ // send the pack files
+ return ciVisibilityClient.SendPackFiles(commitSha, packFiles)
+}
diff --git a/internal/civisibility/integrations/gotesting/instrumentation.go b/internal/civisibility/integrations/gotesting/instrumentation.go
new file mode 100644
index 0000000000..ba934dce7b
--- /dev/null
+++ b/internal/civisibility/integrations/gotesting/instrumentation.go
@@ -0,0 +1,459 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package gotesting
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "slices"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+ "unsafe"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/net"
+)
+
+type (
+ // instrumentationMetadata contains the internal instrumentation metadata
+ instrumentationMetadata struct {
+ IsInternal bool
+ }
+
+ // testExecutionMetadata contains metadata regarding an unique *testing.T or *testing.B execution
+ testExecutionMetadata struct {
+ test integrations.DdTest // internal CI Visibility test event
+ error atomic.Int32 // flag to check if the test event has error data already
+ skipped atomic.Int32 // flag to check if the test event has skipped data already
+ panicData any // panic data recovered from an internal test execution when using an additional feature wrapper
+ panicStacktrace string // stacktrace from the panic recovered from an internal test
+ isARetry bool // flag to tag if a current test execution is a retry
+ isANewTest bool // flag to tag if a current test execution is part of a new test (EFD not known test)
+ hasAdditionalFeatureWrapper bool // flag to check if the current execution is part of an additional feature wrapper
+ }
+
+ // runTestWithRetryOptions contains the options for calling runTestWithRetry function
+ runTestWithRetryOptions struct {
+ targetFunc func(t *testing.T) // target function to retry
+ t *testing.T // test to be executed
+ initialRetryCount int64 // initial retry count
+ adjustRetryCount func(duration time.Duration) int64 // adjust retry count function depending on the duration of the first execution
+ shouldRetry func(ptrToLocalT *testing.T, executionIndex int, remainingRetries int64) bool // function to decide whether we want to perform a retry
+ perExecution func(ptrToLocalT *testing.T, executionIndex int, duration time.Duration) // function to run after each test execution
+ onRetryEnd func(t *testing.T, executionIndex int, lastPtrToLocalT *testing.T) // function executed when all execution have finished
+ execMetaAdjust func(execMeta *testExecutionMetadata, executionIndex int) // function to modify the execution metadata for each execution
+ }
+)
+
+var (
+ // ciVisibilityEnabledValue holds a value to check if ci visibility is enabled or not (1 = enabled / 0 = disabled)
+ ciVisibilityEnabledValue int32 = -1
+
+ // instrumentationMap holds a map of *runtime.Func for tracking instrumented functions
+ instrumentationMap = map[*runtime.Func]*instrumentationMetadata{}
+
+ // instrumentationMapMutex is a read-write mutex for synchronizing access to instrumentationMap.
+ instrumentationMapMutex sync.RWMutex
+
+ // ciVisibilityTests holds a map of *testing.T or *testing.B to execution metadata for tracking tests.
+ ciVisibilityTestMetadata = map[unsafe.Pointer]*testExecutionMetadata{}
+
+ // ciVisibilityTestMetadataMutex is a read-write mutex for synchronizing access to ciVisibilityTestMetadata.
+ ciVisibilityTestMetadataMutex sync.RWMutex
+)
+
+// isCiVisibilityEnabled gets if CI Visibility has been enabled or disabled by the "DD_CIVISIBILITY_ENABLED" environment variable
+func isCiVisibilityEnabled() bool {
+ // let's check if the value has already been loaded from the env-vars
+ enabledValue := atomic.LoadInt32(&ciVisibilityEnabledValue)
+ if enabledValue == -1 {
+ // Get the DD_CIVISIBILITY_ENABLED env var, if not present we default to false (for now). This is because if we are here, it means
+ // that the process was instrumented for ci visibility or by using orchestrion.
+ // So effectively this env-var will act as a kill switch for cases where the code is instrumented, but
+ // we don't want the civisibility instrumentation to be enabled.
+ // *** For preview releases we will default to false, meaning that the use of ci visibility must be opt-in ***
+ if internal.BoolEnv(constants.CIVisibilityEnabledEnvironmentVariable, false) {
+ atomic.StoreInt32(&ciVisibilityEnabledValue, 1)
+ return true
+ } else {
+ atomic.StoreInt32(&ciVisibilityEnabledValue, 0)
+ return false
+ }
+ }
+
+ return enabledValue == 1
+}
+
+// getInstrumentationMetadata gets the stored instrumentation metadata for a given *runtime.Func.
+func getInstrumentationMetadata(fn *runtime.Func) *instrumentationMetadata {
+ instrumentationMapMutex.RLock()
+ defer instrumentationMapMutex.RUnlock()
+ if v, ok := instrumentationMap[fn]; ok {
+ return v
+ }
+ return nil
+}
+
+// setInstrumentationMetadata stores an instrumentation metadata for a given *runtime.Func.
+func setInstrumentationMetadata(fn *runtime.Func, metadata *instrumentationMetadata) {
+ instrumentationMapMutex.RLock()
+ defer instrumentationMapMutex.RUnlock()
+ instrumentationMap[fn] = metadata
+}
+
+// createTestMetadata creates the CI visibility test metadata associated with a given *testing.T, *testing.B, *testing.common
+func createTestMetadata(tb testing.TB) *testExecutionMetadata {
+ ciVisibilityTestMetadataMutex.RLock()
+ defer ciVisibilityTestMetadataMutex.RUnlock()
+ execMetadata := &testExecutionMetadata{}
+ ciVisibilityTestMetadata[reflect.ValueOf(tb).UnsafePointer()] = execMetadata
+ return execMetadata
+}
+
+// getTestMetadata retrieves the CI visibility test metadata associated with a given *testing.T, *testing.B, *testing.common
+func getTestMetadata(tb testing.TB) *testExecutionMetadata {
+ return getTestMetadataFromPointer(reflect.ValueOf(tb).UnsafePointer())
+}
+
+// getTestMetadataFromPointer retrieves the CI visibility test metadata associated with a given *testing.T, *testing.B, *testing.common using a pointer
+func getTestMetadataFromPointer(ptr unsafe.Pointer) *testExecutionMetadata {
+ ciVisibilityTestMetadataMutex.RLock()
+ defer ciVisibilityTestMetadataMutex.RUnlock()
+ if v, ok := ciVisibilityTestMetadata[ptr]; ok {
+ return v
+ }
+ return nil
+}
+
+// deleteTestMetadata delete the CI visibility test metadata associated with a given *testing.T, *testing.B, *testing.common
+func deleteTestMetadata(tb testing.TB) {
+ ciVisibilityTestMetadataMutex.RLock()
+ defer ciVisibilityTestMetadataMutex.RUnlock()
+ delete(ciVisibilityTestMetadata, reflect.ValueOf(tb).UnsafePointer())
+}
+
+// checkIfCIVisibilityExitIsRequiredByPanic checks the additional features settings to decide if we allow individual tests to panic or not
+func checkIfCIVisibilityExitIsRequiredByPanic() bool {
+ // Apply additional features
+ settings := integrations.GetSettings()
+
+ // If we don't plan to do retries then we allow to panic
+ return !settings.FlakyTestRetriesEnabled && !settings.EarlyFlakeDetection.Enabled
+}
+
+// applyAdditionalFeaturesToTestFunc applies all the additional features as wrapper of a func(*testing.T)
+func applyAdditionalFeaturesToTestFunc(f func(*testing.T), testInfo *commonInfo) func(*testing.T) {
+ // Apply additional features
+ settings := integrations.GetSettings()
+
+ // Check if we have something to do, if not we bail out
+ if !settings.FlakyTestRetriesEnabled && !settings.EarlyFlakeDetection.Enabled {
+ return f
+ }
+
+ // Target function
+ targetFunc := f
+
+ // Flaky test retries
+ if settings.FlakyTestRetriesEnabled {
+ targetFunc = applyFlakyTestRetriesAdditionalFeature(targetFunc)
+ }
+
+ // Early flake detection
+ if settings.EarlyFlakeDetection.Enabled {
+ targetFunc = applyEarlyFlakeDetectionAdditionalFeature(testInfo, targetFunc, settings)
+ }
+
+ // Register the instrumented func as an internal instrumented func (to avoid double instrumentation)
+ setInstrumentationMetadata(runtime.FuncForPC(reflect.ValueOf(targetFunc).Pointer()), &instrumentationMetadata{IsInternal: true})
+ return targetFunc
+}
+
+// applyFlakyTestRetriesAdditionalFeature applies the flaky test retries feature as a wrapper of a func(*testing.T)
+func applyFlakyTestRetriesAdditionalFeature(targetFunc func(*testing.T)) func(*testing.T) {
+ flakyRetrySettings := integrations.GetFlakyRetriesSettings()
+
+ // If the retry count per test is > 1 and if we still have remaining total retry count
+ if flakyRetrySettings.RetryCount > 1 && flakyRetrySettings.RemainingTotalRetryCount > 0 {
+ return func(t *testing.T) {
+ runTestWithRetry(&runTestWithRetryOptions{
+ targetFunc: targetFunc,
+ t: t,
+ initialRetryCount: flakyRetrySettings.RetryCount,
+ adjustRetryCount: nil, // No adjustRetryCount
+ shouldRetry: func(ptrToLocalT *testing.T, executionIndex int, remainingRetries int64) bool {
+ remainingTotalRetries := atomic.AddInt64(&flakyRetrySettings.RemainingTotalRetryCount, -1)
+ // Decide whether to retry
+ return ptrToLocalT.Failed() && remainingRetries >= 0 && remainingTotalRetries >= 0
+ },
+ perExecution: nil, // No perExecution needed
+ onRetryEnd: func(t *testing.T, executionIndex int, lastPtrToLocalT *testing.T) {
+ // Update original `t` with results from last execution
+ tCommonPrivates := getTestPrivateFields(t)
+ tCommonPrivates.SetFailed(lastPtrToLocalT.Failed())
+ tCommonPrivates.SetSkipped(lastPtrToLocalT.Skipped())
+
+ // Update parent status if failed
+ if lastPtrToLocalT.Failed() {
+ tParentCommonPrivates := getTestParentPrivateFields(t)
+ tParentCommonPrivates.SetFailed(true)
+ }
+
+ // Print summary after retries
+ if executionIndex > 0 {
+ status := "passed"
+ if t.Failed() {
+ status = "failed"
+ } else if t.Skipped() {
+ status = "skipped"
+ }
+
+ fmt.Printf(" [ %v after %v retries by Datadog's auto test retries ]\n", status, executionIndex)
+ }
+
+ // Check if total retry count was exceeded
+ if flakyRetrySettings.RemainingTotalRetryCount < 1 {
+ fmt.Println(" the maximum number of total retries was exceeded.")
+ }
+ },
+ execMetaAdjust: nil, // No execMetaAdjust needed
+ })
+ }
+ }
+ return targetFunc
+}
+
+// applyEarlyFlakeDetectionAdditionalFeature applies the early flake detection feature as a wrapper of a func(*testing.T)
+func applyEarlyFlakeDetectionAdditionalFeature(testInfo *commonInfo, targetFunc func(*testing.T), settings *net.SettingsResponseData) func(*testing.T) {
+ earlyFlakeDetectionData := integrations.GetEarlyFlakeDetectionSettings()
+ if earlyFlakeDetectionData != nil &&
+ len(earlyFlakeDetectionData.Tests) > 0 {
+
+ // Define is a known test flag
+ isAKnownTest := false
+
+ // Check if the test is a known test or a new one
+ if knownSuites, ok := earlyFlakeDetectionData.Tests[testInfo.moduleName]; ok {
+ if knownTests, ok := knownSuites[testInfo.suiteName]; ok {
+ if slices.Contains(knownTests, testInfo.testName) {
+ isAKnownTest = true
+ }
+ }
+ }
+
+ // If it's a new test, then we apply the EFD wrapper
+ if !isAKnownTest {
+ return func(t *testing.T) {
+ var testPassCount, testSkipCount, testFailCount int
+
+ runTestWithRetry(&runTestWithRetryOptions{
+ targetFunc: targetFunc,
+ t: t,
+ initialRetryCount: 0,
+ adjustRetryCount: func(duration time.Duration) int64 {
+ slowTestRetriesSettings := settings.EarlyFlakeDetection.SlowTestRetries
+ durationSecs := duration.Seconds()
+ if durationSecs < 5 {
+ return int64(slowTestRetriesSettings.FiveS)
+ } else if durationSecs < 10 {
+ return int64(slowTestRetriesSettings.TenS)
+ } else if durationSecs < 30 {
+ return int64(slowTestRetriesSettings.ThirtyS)
+ } else if duration.Minutes() < 5 {
+ return int64(slowTestRetriesSettings.FiveM)
+ }
+ return 0
+ },
+ shouldRetry: func(ptrToLocalT *testing.T, executionIndex int, remainingRetries int64) bool {
+ return remainingRetries >= 0
+ },
+ perExecution: func(ptrToLocalT *testing.T, executionIndex int, duration time.Duration) {
+ // Collect test results
+ if ptrToLocalT.Failed() {
+ testFailCount++
+ } else if ptrToLocalT.Skipped() {
+ testSkipCount++
+ } else {
+ testPassCount++
+ }
+ },
+ onRetryEnd: func(t *testing.T, executionIndex int, lastPtrToLocalT *testing.T) {
+ // Update test status based on collected counts
+ tCommonPrivates := getTestPrivateFields(t)
+ tParentCommonPrivates := getTestParentPrivateFields(t)
+ status := "passed"
+ if testPassCount == 0 {
+ if testSkipCount > 0 {
+ status = "skipped"
+ tCommonPrivates.SetSkipped(true)
+ }
+ if testFailCount > 0 {
+ status = "failed"
+ tCommonPrivates.SetFailed(true)
+ tParentCommonPrivates.SetFailed(true)
+ }
+ }
+
+ // Print summary after retries
+ if executionIndex > 0 {
+ fmt.Printf(" [ %v after %v retries by Datadog's early flake detection ]\n", status, executionIndex)
+ }
+ },
+ execMetaAdjust: func(execMeta *testExecutionMetadata, executionIndex int) {
+ // Set the flag new test to true
+ execMeta.isANewTest = true
+ },
+ })
+ }
+ }
+ }
+ return targetFunc
+}
+
+// runTestWithRetry encapsulates the common retry logic for test functions.
+func runTestWithRetry(options *runTestWithRetryOptions) {
+ executionIndex := -1
+ var panicExecution *testExecutionMetadata
+ var lastPtrToLocalT *testing.T
+
+ // Module and suite for this test
+ var module integrations.DdTestModule
+ var suite integrations.DdTestSuite
+
+ // Check if we have execution metadata to propagate
+ originalExecMeta := getTestMetadata(options.t)
+
+ retryCount := options.initialRetryCount
+
+ for {
+ // Clear the matcher subnames map before each execution to avoid subname tests being called "parent/subname#NN" due to retries
+ getTestContextMatcherPrivateFields(options.t).ClearSubNames()
+
+ // Increment execution index
+ executionIndex++
+
+ // Create a new local copy of `t` to isolate execution results
+ ptrToLocalT := &testing.T{}
+ copyTestWithoutParent(options.t, ptrToLocalT)
+
+ // Create a dummy parent so we can run the test using this local copy
+ // without affecting the test parent
+ localTPrivateFields := getTestPrivateFields(ptrToLocalT)
+ *localTPrivateFields.parent = unsafe.Pointer(&testing.T{})
+
+ // Create an execution metadata instance
+ execMeta := createTestMetadata(ptrToLocalT)
+ execMeta.hasAdditionalFeatureWrapper = true
+
+ // Propagate set tags from a parent wrapper
+ if originalExecMeta != nil {
+ if originalExecMeta.isANewTest {
+ execMeta.isANewTest = true
+ }
+ if originalExecMeta.isARetry {
+ execMeta.isARetry = true
+ }
+ }
+
+ // If we are in a retry execution, set the `isARetry` flag
+ if executionIndex > 0 {
+ execMeta.isARetry = true
+ }
+
+ // Adjust execution metadata
+ if options.execMetaAdjust != nil {
+ options.execMetaAdjust(execMeta, executionIndex)
+ }
+
+ // Run original func similar to how it gets run internally in tRunner
+ startTime := time.Now()
+ chn := make(chan struct{}, 1)
+ go func() {
+ defer func() {
+ chn <- struct{}{}
+ }()
+ options.targetFunc(ptrToLocalT)
+ }()
+ <-chn
+ duration := time.Since(startTime)
+
+ // Call cleanup functions after this execution
+ if err := testingTRunCleanup(ptrToLocalT, 1); err != nil {
+ fmt.Printf("cleanup error: %v\n", err)
+ }
+
+ // Copy the current test to the wrapper if necessary
+ if originalExecMeta != nil {
+ originalExecMeta.test = execMeta.test
+ }
+
+ // Extract module and suite if present
+ currentSuite := execMeta.test.Suite()
+ if suite == nil && currentSuite != nil {
+ suite = currentSuite
+ }
+ if module == nil && currentSuite != nil && currentSuite.Module() != nil {
+ module = currentSuite.Module()
+ }
+
+ // Remove execution metadata
+ deleteTestMetadata(ptrToLocalT)
+
+ // Handle panic data
+ if execMeta.panicData != nil {
+ ptrToLocalT.Fail()
+ if panicExecution == nil {
+ panicExecution = execMeta
+ }
+ }
+
+ // Adjust retry count after first execution if necessary
+ if options.adjustRetryCount != nil && executionIndex == 0 {
+ retryCount = options.adjustRetryCount(duration)
+ }
+
+ // Decrement retry count
+ retryCount--
+
+ // Call perExecution function
+ if options.perExecution != nil {
+ options.perExecution(ptrToLocalT, executionIndex, duration)
+ }
+
+ // Update lastPtrToLocalT
+ lastPtrToLocalT = ptrToLocalT
+
+ // Decide whether to continue
+ if !options.shouldRetry(ptrToLocalT, executionIndex, retryCount) {
+ break
+ }
+ }
+
+ // Call onRetryEnd
+ if options.onRetryEnd != nil {
+ options.onRetryEnd(options.t, executionIndex, lastPtrToLocalT)
+ }
+
+ // After all test executions, check if we need to close the suite and the module
+ if originalExecMeta == nil {
+ checkModuleAndSuite(module, suite)
+ }
+
+ // Re-panic if test failed and panic data exists
+ if options.t.Failed() && panicExecution != nil {
+ // Ensure we flush all CI visibility data and close the session event
+ integrations.ExitCiVisibility()
+ panic(fmt.Sprintf("test failed and panicked after %d retries.\n%v\n%v", executionIndex, panicExecution.panicData, panicExecution.panicStacktrace))
+ }
+}
+
+//go:linkname testingTRunCleanup testing.(*common).runCleanup
+func testingTRunCleanup(c *testing.T, ph int) (panicVal any)
diff --git a/internal/civisibility/integrations/gotesting/instrumentation_orchestrion.go b/internal/civisibility/integrations/gotesting/instrumentation_orchestrion.go
new file mode 100644
index 0000000000..2ae31b8e30
--- /dev/null
+++ b/internal/civisibility/integrations/gotesting/instrumentation_orchestrion.go
@@ -0,0 +1,402 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package gotesting
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync/atomic"
+ "testing"
+ "time"
+ _ "unsafe"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils"
+)
+
+// ******************************************************************************************************************
+// WARNING: DO NOT CHANGE THE SIGNATURE OF THESE FUNCTIONS!
+//
+// The following functions are being used by both the manual api and most importantly the Orchestrion automatic
+// instrumentation integration.
+// ******************************************************************************************************************
+
+// instrumentTestingM helper function to instrument internalTests and internalBenchmarks in a `*testing.M` instance.
+//
+//go:linkname instrumentTestingM
+func instrumentTestingM(m *testing.M) func(exitCode int) {
+ // Check if CI Visibility was disabled using the kill switch before trying to initialize it
+ atomic.StoreInt32(&ciVisibilityEnabledValue, -1)
+ if !isCiVisibilityEnabled() {
+ return func(exitCode int) {}
+ }
+
+ // Initialize CI Visibility
+ integrations.EnsureCiVisibilityInitialization()
+
+ // Create a new test session for CI visibility.
+ session = integrations.CreateTestSession()
+
+ ddm := (*M)(m)
+
+ // Instrument the internal tests for CI visibility.
+ ddm.instrumentInternalTests(getInternalTestArray(m))
+
+ // Instrument the internal benchmarks for CI visibility.
+ for _, v := range os.Args {
+ // check if benchmarking is enabled to instrument
+ if strings.Contains(v, "-bench") || strings.Contains(v, "test.bench") {
+ ddm.instrumentInternalBenchmarks(getInternalBenchmarkArray(m))
+ break
+ }
+ }
+
+ return func(exitCode int) {
+ // Check for code coverage if enabled.
+ if testing.CoverMode() != "" {
+ coveragePercentage := testing.Coverage() * 100
+ session.SetTag(constants.CodeCoveragePercentageOfTotalLines, coveragePercentage)
+ }
+
+ // Close the session and return the exit code.
+ session.Close(exitCode)
+
+ // Finalize CI Visibility
+ integrations.ExitCiVisibility()
+ }
+}
+
+// instrumentTestingTFunc helper function to instrument a testing function func(*testing.T)
+//
+//go:linkname instrumentTestingTFunc
+func instrumentTestingTFunc(f func(*testing.T)) func(*testing.T) {
+ // Check if CI Visibility was disabled using the kill switch before instrumenting
+ if !isCiVisibilityEnabled() {
+ return f
+ }
+
+ // Reflect the function to obtain its pointer.
+ fReflect := reflect.Indirect(reflect.ValueOf(f))
+ moduleName, suiteName := utils.GetModuleAndSuiteName(fReflect.Pointer())
+ originalFunc := runtime.FuncForPC(fReflect.Pointer())
+
+ // Avoid instrumenting twice
+ metadata := getInstrumentationMetadata(originalFunc)
+ if metadata != nil && metadata.IsInternal {
+ // If is an internal test, we don't instrument because f is already the instrumented func by executeInternalTest
+ return f
+ }
+
+ instrumentedFn := func(t *testing.T) {
+ // Initialize module counters if not already present.
+ if _, ok := modulesCounters[moduleName]; !ok {
+ var v int32
+ modulesCounters[moduleName] = &v
+ }
+ // Increment the test count in the module.
+ atomic.AddInt32(modulesCounters[moduleName], 1)
+
+ // Initialize suite counters if not already present.
+ if _, ok := suitesCounters[suiteName]; !ok {
+ var v int32
+ suitesCounters[suiteName] = &v
+ }
+ // Increment the test count in the suite.
+ atomic.AddInt32(suitesCounters[suiteName], 1)
+
+ // Create or retrieve the module, suite, and test for CI visibility.
+ module := session.GetOrCreateModuleWithFramework(moduleName, testFramework, runtime.Version())
+ suite := module.GetOrCreateSuite(suiteName)
+ test := suite.CreateTest(t.Name())
+ test.SetTestFunc(originalFunc)
+
+ // Get the metadata regarding the execution (in case is already created from the additional features)
+ execMeta := getTestMetadata(t)
+ if execMeta == nil {
+ // in case there's no additional features then we create the metadata for this execution and defer the disposal
+ execMeta = createTestMetadata(t)
+ defer deleteTestMetadata(t)
+ }
+
+ // Because this is a subtest let's propagate some execution metadata from the parent test
+ testPrivateFields := getTestPrivateFields(t)
+ if testPrivateFields.parent != nil {
+ parentExecMeta := getTestMetadataFromPointer(*testPrivateFields.parent)
+ if parentExecMeta != nil {
+ if parentExecMeta.isANewTest {
+ execMeta.isANewTest = true
+ }
+ if parentExecMeta.isARetry {
+ execMeta.isARetry = true
+ }
+ }
+ }
+
+ // Set the CI visibility test.
+ execMeta.test = test
+
+ // If the execution is for a new test we tag the test event from early flake detection
+ if execMeta.isANewTest {
+ // Set the is new test tag
+ test.SetTag(constants.TestIsNew, "true")
+ }
+
+ // If the execution is a retry we tag the test event
+ if execMeta.isARetry {
+ // Set the retry tag
+ test.SetTag(constants.TestIsRetry, "true")
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ // Handle panic and set error information.
+ test.SetErrorInfo("panic", fmt.Sprint(r), utils.GetStacktrace(1))
+ test.Close(integrations.ResultStatusFail)
+ checkModuleAndSuite(module, suite)
+ // this is not an internal test. Retries are not applied to subtest (because the parent internal test is going to be retried)
+ // so for this case we avoid closing CI Visibility, but we don't stop the panic from happening.
+ // it will be handled by `t.Run`
+ if checkIfCIVisibilityExitIsRequiredByPanic() {
+ integrations.ExitCiVisibility()
+ }
+ panic(r)
+ } else {
+ // Normal finalization: determine the test result based on its state.
+ if t.Failed() {
+ test.SetTag(ext.Error, true)
+ suite.SetTag(ext.Error, true)
+ module.SetTag(ext.Error, true)
+ test.Close(integrations.ResultStatusFail)
+ } else if t.Skipped() {
+ test.Close(integrations.ResultStatusSkip)
+ } else {
+ test.Close(integrations.ResultStatusPass)
+ }
+ checkModuleAndSuite(module, suite)
+ }
+ }()
+
+ // Execute the original test function.
+ f(t)
+ }
+
+ setInstrumentationMetadata(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedFn)).Pointer()), &instrumentationMetadata{IsInternal: true})
+ return instrumentedFn
+}
+
+// instrumentSetErrorInfo helper function to set an error in the `*testing.T, *testing.B, *testing.common` CI Visibility span
+//
+//go:linkname instrumentSetErrorInfo
+func instrumentSetErrorInfo(tb testing.TB, errType string, errMessage string, skip int) {
+ // Check if CI Visibility was disabled using the kill switch before
+ if !isCiVisibilityEnabled() {
+ return
+ }
+
+ // Get the CI Visibility span and check if we can set the error type, message and stack
+ ciTestItem := getTestMetadata(tb)
+ if ciTestItem != nil && ciTestItem.test != nil && ciTestItem.error.CompareAndSwap(0, 1) {
+ ciTestItem.test.SetErrorInfo(errType, errMessage, utils.GetStacktrace(2+skip))
+ }
+}
+
+// instrumentCloseAndSkip helper function to close and skip with a reason a `*testing.T, *testing.B, *testing.common` CI Visibility span
+//
+//go:linkname instrumentCloseAndSkip
+func instrumentCloseAndSkip(tb testing.TB, skipReason string) {
+ // Check if CI Visibility was disabled using the kill switch before
+ if !isCiVisibilityEnabled() {
+ return
+ }
+
+ // Get the CI Visibility span and check if we can mark it as skipped and close it
+ ciTestItem := getTestMetadata(tb)
+ if ciTestItem != nil && ciTestItem.test != nil && ciTestItem.skipped.CompareAndSwap(0, 1) {
+ ciTestItem.test.CloseWithFinishTimeAndSkipReason(integrations.ResultStatusSkip, time.Now(), skipReason)
+ }
+}
+
+// instrumentSkipNow helper function to close and skip a `*testing.T, *testing.B, *testing.common` CI Visibility span
+//
+//go:linkname instrumentSkipNow
+func instrumentSkipNow(tb testing.TB) {
+ // Check if CI Visibility was disabled using the kill switch before
+ if !isCiVisibilityEnabled() {
+ return
+ }
+
+ // Get the CI Visibility span and check if we can mark it as skipped and close it
+ ciTestItem := getTestMetadata(tb)
+ if ciTestItem != nil && ciTestItem.test != nil && ciTestItem.skipped.CompareAndSwap(0, 1) {
+ ciTestItem.test.Close(integrations.ResultStatusSkip)
+ }
+}
+
+// instrumentTestingBFunc helper function to instrument a benchmark function func(*testing.B)
+//
+//go:linkname instrumentTestingBFunc
+func instrumentTestingBFunc(pb *testing.B, name string, f func(*testing.B)) (string, func(*testing.B)) {
+ // Check if CI Visibility was disabled using the kill switch before instrumenting
+ if !isCiVisibilityEnabled() {
+ return name, f
+ }
+
+ // Reflect the function to obtain its pointer.
+ fReflect := reflect.Indirect(reflect.ValueOf(f))
+ moduleName, suiteName := utils.GetModuleAndSuiteName(fReflect.Pointer())
+ originalFunc := runtime.FuncForPC(fReflect.Pointer())
+
+ // Avoid instrumenting twice
+ if hasCiVisibilityBenchmarkFunc(originalFunc) {
+ return name, f
+ }
+
+ instrumentedFunc := func(b *testing.B) {
+ // The sub-benchmark implementation relies on creating a dummy sub benchmark (called [DD:TestVisibility]) with
+ // a Run over the original sub benchmark function to get the child results without interfering measurements
+ // By doing this the name of the sub-benchmark are changed
+ // from:
+ // benchmark/child
+ // to:
+ // benchmark/[DD:TestVisibility]/child
+ // We use regex and decrement the depth level of the benchmark to restore the original name
+
+ // Initialize module counters if not already present.
+ if _, ok := modulesCounters[moduleName]; !ok {
+ var v int32
+ modulesCounters[moduleName] = &v
+ }
+ // Increment the test count in the module.
+ atomic.AddInt32(modulesCounters[moduleName], 1)
+
+ // Initialize suite counters if not already present.
+ if _, ok := suitesCounters[suiteName]; !ok {
+ var v int32
+ suitesCounters[suiteName] = &v
+ }
+ // Increment the test count in the suite.
+ atomic.AddInt32(suitesCounters[suiteName], 1)
+
+ // Decrement level.
+ bpf := getBenchmarkPrivateFields(b)
+ bpf.AddLevel(-1)
+
+ startTime := time.Now()
+ module := session.GetOrCreateModuleWithFrameworkAndStartTime(moduleName, testFramework, runtime.Version(), startTime)
+ suite := module.GetOrCreateSuiteWithStartTime(suiteName, startTime)
+ test := suite.CreateTestWithStartTime(fmt.Sprintf("%s/%s", pb.Name(), name), startTime)
+ test.SetTestFunc(originalFunc)
+
+ // Restore the original name without the sub-benchmark auto name.
+ *bpf.name = subBenchmarkAutoNameRegex.ReplaceAllString(*bpf.name, "")
+
+ // Run original benchmark.
+ var iPfOfB *benchmarkPrivateFields
+ var recoverFunc *func(r any)
+ instrumentedFunc := func(b *testing.B) {
+ // Stop the timer to do the initialization and replacements.
+ b.StopTimer()
+
+ defer func() {
+ if r := recover(); r != nil {
+ if recoverFunc != nil {
+ fn := *recoverFunc
+ fn(r)
+ }
+ panic(r)
+ }
+ }()
+
+ // First time we get the private fields of the inner testing.B.
+ iPfOfB = getBenchmarkPrivateFields(b)
+ // Replace this function with the original one (executed only once - the first iteration[b.run1]).
+ *iPfOfB.benchFunc = f
+
+ // Get the metadata regarding the execution (in case is already created from the additional features)
+ execMeta := getTestMetadata(b)
+ if execMeta == nil {
+ // in case there's no additional features then we create the metadata for this execution and defer the disposal
+ execMeta = createTestMetadata(b)
+ defer deleteTestMetadata(b)
+ }
+
+ // Set the CI visibility test.
+ execMeta.test = test
+
+ // Enable the timer again.
+ b.ResetTimer()
+ b.StartTimer()
+
+ // Execute original func
+ f(b)
+ }
+
+ setCiVisibilityBenchmarkFunc(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedFunc)).Pointer()))
+ b.Run(name, instrumentedFunc)
+
+ endTime := time.Now()
+ results := iPfOfB.result
+
+ // Set benchmark data for CI visibility.
+ test.SetBenchmarkData("duration", map[string]any{
+ "run": results.N,
+ "mean": results.NsPerOp(),
+ })
+ test.SetBenchmarkData("memory_total_operations", map[string]any{
+ "run": results.N,
+ "mean": results.AllocsPerOp(),
+ "statistics.max": results.MemAllocs,
+ })
+ test.SetBenchmarkData("mean_heap_allocations", map[string]any{
+ "run": results.N,
+ "mean": results.AllocedBytesPerOp(),
+ })
+ test.SetBenchmarkData("total_heap_allocations", map[string]any{
+ "run": results.N,
+ "mean": iPfOfB.result.MemBytes,
+ })
+ if len(results.Extra) > 0 {
+ mapConverted := map[string]any{}
+ for k, v := range results.Extra {
+ mapConverted[k] = v
+ }
+ test.SetBenchmarkData("extra", mapConverted)
+ }
+
+ // Define a function to handle panic during benchmark finalization.
+ panicFunc := func(r any) {
+ test.SetErrorInfo("panic", fmt.Sprint(r), utils.GetStacktrace(1))
+ suite.SetTag(ext.Error, true)
+ module.SetTag(ext.Error, true)
+ test.Close(integrations.ResultStatusFail)
+ checkModuleAndSuite(module, suite)
+ integrations.ExitCiVisibility()
+ }
+ recoverFunc = &panicFunc
+
+ // Normal finalization: determine the benchmark result based on its state.
+ if iPfOfB.B.Failed() {
+ test.SetTag(ext.Error, true)
+ suite.SetTag(ext.Error, true)
+ module.SetTag(ext.Error, true)
+ test.CloseWithFinishTime(integrations.ResultStatusFail, endTime)
+ } else if iPfOfB.B.Skipped() {
+ test.CloseWithFinishTime(integrations.ResultStatusSkip, endTime)
+ } else {
+ test.CloseWithFinishTime(integrations.ResultStatusPass, endTime)
+ }
+
+ checkModuleAndSuite(module, suite)
+ }
+ setCiVisibilityBenchmarkFunc(originalFunc)
+ setCiVisibilityBenchmarkFunc(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedFunc)).Pointer()))
+ return subBenchmarkAutoName, instrumentedFunc
+}
diff --git a/internal/civisibility/integrations/gotesting/reflections.go b/internal/civisibility/integrations/gotesting/reflections.go
index 6aaac3ed4c..745aab1468 100644
--- a/internal/civisibility/integrations/gotesting/reflections.go
+++ b/internal/civisibility/integrations/gotesting/reflections.go
@@ -7,17 +7,25 @@ package gotesting
import (
"errors"
+ "io"
"reflect"
"sync"
+ "sync/atomic"
"testing"
+ "time"
"unsafe"
)
// getFieldPointerFrom gets an unsafe.Pointer (gc-safe type of pointer) to a struct field
// useful to get or set values to private field
func getFieldPointerFrom(value any, fieldName string) (unsafe.Pointer, error) {
- indirectValue := reflect.Indirect(reflect.ValueOf(value))
- member := indirectValue.FieldByName(fieldName)
+ return getFieldPointerFromValue(reflect.Indirect(reflect.ValueOf(value)), fieldName)
+}
+
+// getFieldPointerFromValue gets an unsafe.Pointer (gc-safe type of pointer) to a struct field
+// useful to get or set values to private field
+func getFieldPointerFromValue(value reflect.Value, fieldName string) (unsafe.Pointer, error) {
+ member := value.FieldByName(fieldName)
if member.IsValid() {
return unsafe.Pointer(member.UnsafeAddr()), nil
}
@@ -25,7 +33,61 @@ func getFieldPointerFrom(value any, fieldName string) (unsafe.Pointer, error) {
return unsafe.Pointer(nil), errors.New("member is invalid")
}
+// copyFieldUsingPointers copies a private field value from one struct to another of the same type
+func copyFieldUsingPointers[V any](source any, target any, fieldName string) error {
+ sourcePtr, err := getFieldPointerFrom(source, fieldName)
+ if err != nil {
+ return err
+ }
+ targetPtr, err := getFieldPointerFrom(target, fieldName)
+ if err != nil {
+ return err
+ }
+
+ *(*V)(targetPtr) = *(*V)(sourcePtr)
+ return nil
+}
+
+// ****************
+// COMMON
+// ****************
+
+// commonPrivateFields is collection of required private fields from testing.common
+type commonPrivateFields struct {
+ mu *sync.RWMutex
+ level *int
+ name *string // Name of test or benchmark.
+ failed *bool // Test or benchmark has failed.
+ skipped *bool // Test or benchmark has been skipped.
+ parent *unsafe.Pointer // Parent common
+}
+
+// AddLevel increase or decrease the testing.common.level field value, used by
+// testing.B to create the name of the benchmark test
+func (c *commonPrivateFields) AddLevel(delta int) int {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ *c.level = *c.level + delta
+ return *c.level
+}
+
+// SetFailed set the boolean value in testing.common.failed field value
+func (c *commonPrivateFields) SetFailed(value bool) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ *c.failed = value
+}
+
+// SetSkipped set the boolean value in testing.common.skipped field value
+func (c *commonPrivateFields) SetSkipped(value bool) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ *c.skipped = value
+}
+
+// ****************
// TESTING
+// ****************
// getInternalTestArray gets the pointer to the testing.InternalTest array inside a
// testing.M instance containing all the "root" tests
@@ -36,7 +98,147 @@ func getInternalTestArray(m *testing.M) *[]testing.InternalTest {
return nil
}
+// getTestPrivateFields is a method to retrieve all required privates field from
+// testing.T, returning a commonPrivateFields instance
+func getTestPrivateFields(t *testing.T) *commonPrivateFields {
+ testFields := &commonPrivateFields{}
+
+ // testing.common
+ if ptr, err := getFieldPointerFrom(t, "mu"); err == nil {
+ testFields.mu = (*sync.RWMutex)(ptr)
+ }
+ if ptr, err := getFieldPointerFrom(t, "level"); err == nil {
+ testFields.level = (*int)(ptr)
+ }
+ if ptr, err := getFieldPointerFrom(t, "name"); err == nil {
+ testFields.name = (*string)(ptr)
+ }
+ if ptr, err := getFieldPointerFrom(t, "failed"); err == nil {
+ testFields.failed = (*bool)(ptr)
+ }
+ if ptr, err := getFieldPointerFrom(t, "skipped"); err == nil {
+ testFields.skipped = (*bool)(ptr)
+ }
+ if ptr, err := getFieldPointerFrom(t, "parent"); err == nil {
+ testFields.parent = (*unsafe.Pointer)(ptr)
+ }
+
+ return testFields
+}
+
+// getTestParentPrivateFields is a method to retrieve all required parent privates field from
+// testing.T.parent, returning a commonPrivateFields instance
+func getTestParentPrivateFields(t *testing.T) *commonPrivateFields {
+ indirectValue := reflect.Indirect(reflect.ValueOf(t))
+ member := indirectValue.FieldByName("parent")
+ if member.IsValid() {
+ value := member.Elem()
+ testFields := &commonPrivateFields{}
+
+ // testing.common
+ if ptr, err := getFieldPointerFromValue(value, "mu"); err == nil {
+ testFields.mu = (*sync.RWMutex)(ptr)
+ }
+ if ptr, err := getFieldPointerFromValue(value, "level"); err == nil {
+ testFields.level = (*int)(ptr)
+ }
+ if ptr, err := getFieldPointerFromValue(value, "name"); err == nil {
+ testFields.name = (*string)(ptr)
+ }
+ if ptr, err := getFieldPointerFromValue(value, "failed"); err == nil {
+ testFields.failed = (*bool)(ptr)
+ }
+ if ptr, err := getFieldPointerFromValue(value, "skipped"); err == nil {
+ testFields.skipped = (*bool)(ptr)
+ }
+
+ return testFields
+ }
+ return nil
+}
+
+// contextMatcher is collection of required private fields from testing.context.match
+type contextMatcher struct {
+ mu *sync.RWMutex
+ subNames *map[string]int32
+}
+
+// ClearSubNames clears the subname map used for creating unique names for subtests
+func (c *contextMatcher) ClearSubNames() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ *c.subNames = map[string]int32{}
+}
+
+// getTestContextMatcherPrivateFields is a method to retrieve all required privates field from
+// testing.T.context.match, returning a contextMatcher instance
+func getTestContextMatcherPrivateFields(t *testing.T) *contextMatcher {
+ indirectValue := reflect.Indirect(reflect.ValueOf(t))
+ contextMember := indirectValue.FieldByName("context")
+ if !contextMember.IsValid() {
+ return nil
+ }
+ contextMember = contextMember.Elem()
+ matchMember := contextMember.FieldByName("match")
+ if !matchMember.IsValid() {
+ return nil
+ }
+ matchMember = matchMember.Elem()
+
+ fields := &contextMatcher{}
+ if ptr, err := getFieldPointerFromValue(matchMember, "mu"); err == nil {
+ fields.mu = (*sync.RWMutex)(ptr)
+ }
+ if ptr, err := getFieldPointerFromValue(matchMember, "subNames"); err == nil {
+ fields.subNames = (*map[string]int32)(ptr)
+ }
+
+ return fields
+}
+
+// copyTestWithoutParent tries to copy all private fields except the t.parent from a *testing.T to another
+func copyTestWithoutParent(source *testing.T, target *testing.T) {
+ // Copy important field values
+ _ = copyFieldUsingPointers[[]byte](source, target, "output") // Output generated by test or benchmark.
+ _ = copyFieldUsingPointers[io.Writer](source, target, "w") // For flushToParent.
+ _ = copyFieldUsingPointers[bool](source, target, "ran") // Test or benchmark (or one of its subtests) was executed.
+ _ = copyFieldUsingPointers[bool](source, target, "failed") // Test or benchmark has failed.
+ _ = copyFieldUsingPointers[bool](source, target, "skipped") // Test or benchmark has been skipped.
+ _ = copyFieldUsingPointers[bool](source, target, "done") // Test is finished and all subtests have completed.
+ _ = copyFieldUsingPointers[map[uintptr]struct{}](source, target, "helperPCs") // functions to be skipped when writing file/line info
+ _ = copyFieldUsingPointers[map[string]struct{}](source, target, "helperNames") // helperPCs converted to function names
+ _ = copyFieldUsingPointers[[]func()](source, target, "cleanups") // optional functions to be called at the end of the test
+ _ = copyFieldUsingPointers[string](source, target, "cleanupName") // Name of the cleanup function.
+ _ = copyFieldUsingPointers[[]uintptr](source, target, "cleanupPc") // The stack trace at the point where Cleanup was called.
+ _ = copyFieldUsingPointers[bool](source, target, "finished") // Test function has completed.
+ _ = copyFieldUsingPointers[bool](source, target, "inFuzzFn") // Whether the fuzz target, if this is one, is running.
+
+ _ = copyFieldUsingPointers[unsafe.Pointer](source, target, "chatty") // A copy of chattyPrinter, if the chatty flag is set.
+ _ = copyFieldUsingPointers[bool](source, target, "bench") // Whether the current test is a benchmark.
+ _ = copyFieldUsingPointers[atomic.Bool](source, target, "hasSub") // whether there are sub-benchmarks.
+ _ = copyFieldUsingPointers[atomic.Bool](source, target, "cleanupStarted") // Registered cleanup callbacks have started to execute
+ _ = copyFieldUsingPointers[string](source, target, "runner") // Function name of tRunner running the test.
+ _ = copyFieldUsingPointers[bool](source, target, "isParallel") // Whether the test is parallel.
+
+ _ = copyFieldUsingPointers[int](source, target, "level") // Nesting depth of test or benchmark.
+ _ = copyFieldUsingPointers[[]uintptr](source, target, "creator") // If level > 0, the stack trace at the point where the parent called t.Run.
+ _ = copyFieldUsingPointers[string](source, target, "name") // Name of test or benchmark.
+ _ = copyFieldUsingPointers[unsafe.Pointer](source, target, "start") // Time test or benchmark started
+ _ = copyFieldUsingPointers[time.Duration](source, target, "duration")
+ _ = copyFieldUsingPointers[[]*testing.T](source, target, "sub") // Queue of subtests to be run in parallel.
+ _ = copyFieldUsingPointers[atomic.Int64](source, target, "lastRaceErrors") // Max value of race.Errors seen during the test or its subtests.
+ _ = copyFieldUsingPointers[atomic.Bool](source, target, "raceErrorLogged")
+ _ = copyFieldUsingPointers[string](source, target, "tempDir")
+ _ = copyFieldUsingPointers[error](source, target, "tempDirErr")
+ _ = copyFieldUsingPointers[int32](source, target, "tempDirSeq")
+
+ _ = copyFieldUsingPointers[bool](source, target, "isEnvSet")
+ _ = copyFieldUsingPointers[unsafe.Pointer](source, target, "context") // For running tests and subtests.
+}
+
+// ****************
// BENCHMARKS
+// ****************
// get the pointer to the internal benchmark array
// getInternalBenchmarkArray gets the pointer to the testing.InternalBenchmark array inside
@@ -48,22 +250,6 @@ func getInternalBenchmarkArray(m *testing.M) *[]testing.InternalBenchmark {
return nil
}
-// commonPrivateFields is collection of required private fields from testing.common
-type commonPrivateFields struct {
- mu *sync.RWMutex
- level *int
- name *string // Name of test or benchmark.
-}
-
-// AddLevel increase or decrease the testing.common.level field value, used by
-// testing.B to create the name of the benchmark test
-func (c *commonPrivateFields) AddLevel(delta int) int {
- c.mu.Lock()
- defer c.mu.Unlock()
- *c.level = *c.level + delta
- return *c.level
-}
-
// benchmarkPrivateFields is a collection of required private fields from testing.B
// also contains a pointer to the original testing.B for easy access
type benchmarkPrivateFields struct {
@@ -80,7 +266,7 @@ func getBenchmarkPrivateFields(b *testing.B) *benchmarkPrivateFields {
B: b,
}
- // common
+ // testing.common
if ptr, err := getFieldPointerFrom(b, "mu"); err == nil {
benchFields.mu = (*sync.RWMutex)(ptr)
}
@@ -90,8 +276,17 @@ func getBenchmarkPrivateFields(b *testing.B) *benchmarkPrivateFields {
if ptr, err := getFieldPointerFrom(b, "name"); err == nil {
benchFields.name = (*string)(ptr)
}
+ if ptr, err := getFieldPointerFrom(b, "failed"); err == nil {
+ benchFields.failed = (*bool)(ptr)
+ }
+ if ptr, err := getFieldPointerFrom(b, "skipped"); err == nil {
+ benchFields.skipped = (*bool)(ptr)
+ }
+ if ptr, err := getFieldPointerFrom(b, "parent"); err == nil {
+ benchFields.parent = (*unsafe.Pointer)(ptr)
+ }
- // benchmark
+ // testing.B
if ptr, err := getFieldPointerFrom(b, "benchFunc"); err == nil {
benchFields.benchFunc = (*func(b *testing.B))(ptr)
}
diff --git a/internal/civisibility/integrations/gotesting/testcontroller_test.go b/internal/civisibility/integrations/gotesting/testcontroller_test.go
new file mode 100644
index 0000000000..1c6ed68baf
--- /dev/null
+++ b/internal/civisibility/integrations/gotesting/testcontroller_test.go
@@ -0,0 +1,485 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package gotesting
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "os/exec"
+ "testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils/net"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+var currentM *testing.M
+var mTracer mocktracer.Tracer
+
+// TestMain is the entry point for testing and runs before any test.
+func TestMain(m *testing.M) {
+ log.SetLevel(log.LevelDebug)
+
+ // We need to spawn separated test process for each scenario
+ scenarios := []string{"TestFlakyTestRetries", "TestEarlyFlakeDetection", "TestFlakyTestRetriesAndEarlyFlakeDetection"}
+
+ if internal.BoolEnv(scenarios[0], false) {
+ fmt.Printf("Scenario %s started.\n", scenarios[0])
+ runFlakyTestRetriesTests(m)
+ } else if internal.BoolEnv(scenarios[1], false) {
+ fmt.Printf("Scenario %s started.\n", scenarios[1])
+ runEarlyFlakyTestDetectionTests(m)
+ } else if internal.BoolEnv(scenarios[2], false) {
+ fmt.Printf("Scenario %s started.\n", scenarios[2])
+ runFlakyTestRetriesWithEarlyFlakyTestDetectionTests(m)
+ } else {
+ fmt.Println("Starting tests...")
+ for _, v := range scenarios {
+ cmd := exec.Command(os.Args[0], os.Args[1:]...)
+ cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
+ cmd.Env = append(cmd.Env, os.Environ()...)
+ cmd.Env = append(cmd.Env, fmt.Sprintf("%s=true", v))
+ fmt.Printf("Running scenario: %s:\n", v)
+ err := cmd.Run()
+ fmt.Printf("Done.\n\n")
+ if err != nil {
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ fmt.Printf("Scenario %s failed with exit code: %d\n", v, exiterr.ExitCode())
+ os.Exit(exiterr.ExitCode())
+ } else {
+ fmt.Printf("cmd.Run: %v\n", err)
+ os.Exit(1)
+ }
+ break
+ }
+ }
+ }
+
+ os.Exit(0)
+}
+
+func runFlakyTestRetriesTests(m *testing.M) {
+ // mock the settings api to enable automatic test retries
+ server := setUpHttpServer(true, false, nil)
+ defer server.Close()
+
+ // set a custom retry count
+ os.Setenv(constants.CIVisibilityFlakyRetryCountEnvironmentVariable, "10")
+
+ // initialize the mock tracer for doing assertions on the finished spans
+ currentM = m
+ mTracer = integrations.InitializeCIVisibilityMock()
+
+ // execute the tests, we are expecting some tests to fail and check the assertion later
+ exitCode := RunM(m)
+ if exitCode != 1 {
+ panic("expected the exit code to be 1. We have a failing test on purpose.")
+ }
+
+ // get all finished spans
+ finishedSpans := mTracer.FinishedSpans()
+
+ // 1 session span
+ // 1 module span
+ // 2 suite span (testing_test.go and reflections_test.go)
+ // 5 tests from reflections_test.go
+ // 1 TestMyTest01
+ // 1 TestMyTest02 + 2 subtests
+ // 1 Test_Foo + 3 subtests
+ // 1 TestSkip
+ // 1 TestRetryWithPanic + 3 retry tests from testing_test.go
+ // 1 TestRetryWithFail + 3 retry tests from testing_test.go
+ // 1 TestRetryAlwaysFail + 10 retry tests from testing_test.go
+ // 1 TestNormalPassingAfterRetryAlwaysFail
+ // 1 TestEarlyFlakeDetection
+
+ // check spans by resource name
+ checkSpansByResourceName(finishedSpans, "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations/gotesting", 1)
+ checkSpansByResourceName(finishedSpans, "reflections_test.go", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest01", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02/sub01", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02/sub01/sub03", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/yellow_should_return_color", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/banana_should_return_fruit", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/duck_should_return_animal", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestSkip", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryWithPanic", 4)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryWithFail", 4)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryAlwaysFail", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestNormalPassingAfterRetryAlwaysFail", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestEarlyFlakeDetection", 1)
+
+ // check spans by tag
+ checkSpansByTagName(finishedSpans, constants.TestIsRetry, 16)
+
+ // check spans by type
+ checkSpansByType(finishedSpans,
+ 39,
+ 1,
+ 1,
+ 2,
+ 35,
+ 0)
+
+ os.Exit(0)
+}
+
+func runEarlyFlakyTestDetectionTests(m *testing.M) {
+ // mock the settings api to enable automatic test retries
+ server := setUpHttpServer(false, true, &net.EfdResponseData{
+ Tests: net.EfdResponseDataModules{
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations/gotesting": net.EfdResponseDataSuites{
+ "reflections_test.go": []string{
+ "TestGetFieldPointerFrom",
+ "TestGetInternalTestArray",
+ "TestGetInternalBenchmarkArray",
+ "TestCommonPrivateFields_AddLevel",
+ "TestGetBenchmarkPrivateFields",
+ },
+ },
+ },
+ })
+ defer server.Close()
+
+ // initialize the mock tracer for doing assertions on the finished spans
+ currentM = m
+ mTracer = integrations.InitializeCIVisibilityMock()
+
+ // execute the tests, we are expecting some tests to fail and check the assertion later
+ exitCode := RunM(m)
+ if exitCode != 1 {
+ panic("expected the exit code to be 1. We have a failing test on purpose.")
+ }
+
+ // get all finished spans
+ finishedSpans := mTracer.FinishedSpans()
+
+ // 1 session span
+ // 1 module span
+ // 2 suite span (testing_test.go and reflections_test.go)
+ // 5 tests from reflections_test.go
+ // 11 TestMyTest01
+ // 11 TestMyTest02 + 22 subtests
+ // 11 Test_Foo + 33 subtests
+ // 11 TestSkip
+ // 11 TestRetryWithPanic
+ // 11 TestRetryWithFail
+ // 11 TestRetryAlwaysFail
+ // 11 TestNormalPassingAfterRetryAlwaysFail
+ // 11 TestEarlyFlakeDetection
+ // 22 normal spans from testing_test.go
+
+ // check spans by resource name
+ checkSpansByResourceName(finishedSpans, "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations/gotesting", 1)
+ checkSpansByResourceName(finishedSpans, "reflections_test.go", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest01", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02/sub01", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02/sub01/sub03", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/yellow_should_return_color", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/banana_should_return_fruit", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/duck_should_return_animal", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestSkip", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryWithPanic", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryWithFail", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryAlwaysFail", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestNormalPassingAfterRetryAlwaysFail", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestEarlyFlakeDetection", 11)
+
+ // check spans by tag
+ checkSpansByTagName(finishedSpans, constants.TestIsNew, 154)
+ checkSpansByTagName(finishedSpans, constants.TestIsRetry, 140)
+
+ // check spans by type
+ checkSpansByType(finishedSpans,
+ 163,
+ 1,
+ 1,
+ 2,
+ 159,
+ 0)
+
+ os.Exit(0)
+}
+
+func runFlakyTestRetriesWithEarlyFlakyTestDetectionTests(m *testing.M) {
+ // mock the settings api to enable automatic test retries
+ server := setUpHttpServer(true, true, &net.EfdResponseData{
+ Tests: net.EfdResponseDataModules{
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations/gotesting": net.EfdResponseDataSuites{
+ "reflections_test.go": []string{
+ "TestGetFieldPointerFrom",
+ "TestGetInternalTestArray",
+ "TestGetInternalBenchmarkArray",
+ "TestCommonPrivateFields_AddLevel",
+ "TestGetBenchmarkPrivateFields",
+ },
+ "testing_test.go": []string{
+ "TestMyTest01",
+ "TestMyTest02",
+ "Test_Foo",
+ "TestWithExternalCalls",
+ "TestSkip",
+ "TestRetryWithPanic",
+ "TestRetryWithFail",
+ "TestRetryAlwaysFail",
+ "TestNormalPassingAfterRetryAlwaysFail",
+ },
+ },
+ },
+ })
+ defer server.Close()
+
+ // set a custom retry count
+ os.Setenv(constants.CIVisibilityFlakyRetryCountEnvironmentVariable, "10")
+
+ // initialize the mock tracer for doing assertions on the finished spans
+ currentM = m
+ mTracer = integrations.InitializeCIVisibilityMock()
+
+ // execute the tests, we are expecting some tests to fail and check the assertion later
+ exitCode := RunM(m)
+ if exitCode != 1 {
+ panic("expected the exit code to be 1. We have a failing test on purpose.")
+ }
+
+ // get all finished spans
+ finishedSpans := mTracer.FinishedSpans()
+
+ // 1 session span
+ // 1 module span
+ // 2 suite span (testing_test.go and reflections_test.go)
+ // 5 tests from reflections_test.go
+ // 1 TestMyTest01
+ // 1 TestMyTest02 + 2 subtests
+ // 1 Test_Foo + 3 subtests
+ // 1 TestWithExternalCalls + 2 subtests
+ // 1 TestSkip
+ // 1 TestRetryWithPanic + 3 retry tests from testing_test.go
+ // 1 TestRetryWithFail + 3 retry tests from testing_test.go
+ // 1 TestRetryAlwaysFail + 10 retry tests from testing_test.go
+ // 1 TestNormalPassingAfterRetryAlwaysFail
+ // 11 TestEarlyFlakeDetection + 10 retries
+ // 2 normal spans from testing_test.go
+
+ // check spans by resource name
+ checkSpansByResourceName(finishedSpans, "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations/gotesting", 1)
+ checkSpansByResourceName(finishedSpans, "reflections_test.go", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest01", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02/sub01", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestMyTest02/sub01/sub03", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/yellow_should_return_color", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/banana_should_return_fruit", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.Test_Foo/duck_should_return_animal", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestSkip", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryWithPanic", 4)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryWithFail", 4)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestRetryAlwaysFail", 11)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestNormalPassingAfterRetryAlwaysFail", 1)
+ checkSpansByResourceName(finishedSpans, "testing_test.go.TestEarlyFlakeDetection", 21)
+
+ // check spans by tag
+ checkSpansByTagName(finishedSpans, constants.TestIsNew, 21)
+ checkSpansByTagName(finishedSpans, constants.TestIsRetry, 36)
+
+ // check spans by type
+ checkSpansByType(finishedSpans,
+ 59,
+ 1,
+ 1,
+ 2,
+ 55,
+ 0)
+
+ os.Exit(0)
+}
+
+func checkSpansByType(finishedSpans []mocktracer.Span,
+ totalFinishedSpansCount int, sessionSpansCount int, moduleSpansCount int,
+ suiteSpansCount int, testSpansCount int, normalSpansCount int) {
+ calculatedFinishedSpans := len(finishedSpans)
+ fmt.Printf("Number of spans received: %d\n", calculatedFinishedSpans)
+ if calculatedFinishedSpans < totalFinishedSpansCount {
+ panic(fmt.Sprintf("expected at least %d finished spans, got %d", totalFinishedSpansCount, calculatedFinishedSpans))
+ }
+
+ sessionSpans := getSpansWithType(finishedSpans, constants.SpanTypeTestSession)
+ calculatedSessionSpans := len(sessionSpans)
+ fmt.Printf("Number of sessions received: %d\n", calculatedSessionSpans)
+ showResourcesNameFromSpans(sessionSpans)
+ if calculatedSessionSpans != sessionSpansCount {
+ panic(fmt.Sprintf("expected exactly %d session span, got %d", sessionSpansCount, calculatedSessionSpans))
+ }
+
+ moduleSpans := getSpansWithType(finishedSpans, constants.SpanTypeTestModule)
+ calculatedModuleSpans := len(moduleSpans)
+ fmt.Printf("Number of modules received: %d\n", calculatedModuleSpans)
+ showResourcesNameFromSpans(moduleSpans)
+ if calculatedModuleSpans != moduleSpansCount {
+ panic(fmt.Sprintf("expected exactly %d module span, got %d", moduleSpansCount, calculatedModuleSpans))
+ }
+
+ suiteSpans := getSpansWithType(finishedSpans, constants.SpanTypeTestSuite)
+ calculatedSuiteSpans := len(suiteSpans)
+ fmt.Printf("Number of suites received: %d\n", calculatedSuiteSpans)
+ showResourcesNameFromSpans(suiteSpans)
+ if calculatedSuiteSpans != suiteSpansCount {
+ panic(fmt.Sprintf("expected exactly %d suite spans, got %d", suiteSpansCount, calculatedSuiteSpans))
+ }
+
+ testSpans := getSpansWithType(finishedSpans, constants.SpanTypeTest)
+ calculatedTestSpans := len(testSpans)
+ fmt.Printf("Number of tests received: %d\n", calculatedTestSpans)
+ showResourcesNameFromSpans(testSpans)
+ if calculatedTestSpans != testSpansCount {
+ panic(fmt.Sprintf("expected exactly %d test spans, got %d", testSpansCount, calculatedTestSpans))
+ }
+
+ normalSpans := getSpansWithType(finishedSpans, ext.SpanTypeHTTP)
+ calculatedNormalSpans := len(normalSpans)
+ fmt.Printf("Number of http spans received: %d\n", calculatedNormalSpans)
+ showResourcesNameFromSpans(normalSpans)
+ if calculatedNormalSpans != normalSpansCount {
+ panic(fmt.Sprintf("expected exactly %d normal spans, got %d", normalSpansCount, calculatedNormalSpans))
+ }
+}
+
+func checkSpansByResourceName(finishedSpans []mocktracer.Span, resourceName string, count int) []mocktracer.Span {
+ spans := getSpansWithResourceName(finishedSpans, resourceName)
+ numOfSpans := len(spans)
+ if numOfSpans != count {
+ panic(fmt.Sprintf("expected exactly %d spans with resource name: %s, got %d", count, resourceName, numOfSpans))
+ }
+
+ return spans
+}
+
+func checkSpansByTagName(finishedSpans []mocktracer.Span, tagName string, count int) []mocktracer.Span {
+ spans := getSpansWithTagName(finishedSpans, tagName)
+ numOfSpans := len(spans)
+ if numOfSpans != count {
+ panic(fmt.Sprintf("expected exactly %d spans with tag name: %s, got %d", count, tagName, numOfSpans))
+ }
+
+ return spans
+}
+
+func setUpHttpServer(flakyRetriesEnabled bool, earlyFlakyDetectionEnabled bool, earlyFlakyDetectionData *net.EfdResponseData) *httptest.Server {
+ // mock the settings api to enable automatic test retries
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Printf("MockApi received request: %s\n", r.URL.Path)
+
+ // Settings request
+ if r.URL.Path == "/api/v2/libraries/tests/services/setting" {
+ w.Header().Set("Content-Type", "application/json")
+ response := struct {
+ Data struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ Attributes net.SettingsResponseData `json:"attributes"`
+ } `json:"data,omitempty"`
+ }{}
+
+ // let's enable flaky test retries
+ response.Data.Attributes = net.SettingsResponseData{
+ FlakyTestRetriesEnabled: flakyRetriesEnabled,
+ }
+ response.Data.Attributes.EarlyFlakeDetection.Enabled = earlyFlakyDetectionEnabled
+ response.Data.Attributes.EarlyFlakeDetection.SlowTestRetries.FiveS = 10
+ response.Data.Attributes.EarlyFlakeDetection.SlowTestRetries.TenS = 5
+ response.Data.Attributes.EarlyFlakeDetection.SlowTestRetries.ThirtyS = 3
+ response.Data.Attributes.EarlyFlakeDetection.SlowTestRetries.FiveM = 2
+
+ fmt.Printf("MockApi sending response: %v\n", response)
+ json.NewEncoder(w).Encode(&response)
+ } else if earlyFlakyDetectionEnabled && r.URL.Path == "/api/v2/ci/libraries/tests" {
+ w.Header().Set("Content-Type", "application/json")
+ response := struct {
+ Data struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ Attributes net.EfdResponseData `json:"attributes"`
+ } `json:"data,omitempty"`
+ }{}
+
+ if earlyFlakyDetectionData != nil {
+ response.Data.Attributes = *earlyFlakyDetectionData
+ }
+
+ fmt.Printf("MockApi sending response: %v\n", response)
+ json.NewEncoder(w).Encode(&response)
+ } else if r.URL.Path == "/api/v2/git/repository/search_commits" {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte("{}"))
+ } else if r.URL.Path == "/api/v2/git/repository/packfile" {
+ w.WriteHeader(http.StatusAccepted)
+ } else {
+ http.NotFound(w, r)
+ }
+ }))
+
+ // set the custom agentless url and the flaky retry count env-var
+ fmt.Printf("Using mockapi at: %s\n", server.URL)
+ os.Setenv(constants.CIVisibilityAgentlessEnabledEnvironmentVariable, "1")
+ os.Setenv(constants.CIVisibilityAgentlessURLEnvironmentVariable, server.URL)
+ os.Setenv(constants.APIKeyEnvironmentVariable, "12345")
+
+ return server
+}
+
+func getSpansWithType(spans []mocktracer.Span, spanType string) []mocktracer.Span {
+ var result []mocktracer.Span
+ for _, span := range spans {
+ if span.Tag(ext.SpanType) == spanType {
+ result = append(result, span)
+ }
+ }
+
+ return result
+}
+
+func getSpansWithResourceName(spans []mocktracer.Span, resourceName string) []mocktracer.Span {
+ var result []mocktracer.Span
+ for _, span := range spans {
+ if span.Tag(ext.ResourceName) == resourceName {
+ result = append(result, span)
+ }
+ }
+
+ return result
+}
+
+func getSpansWithTagName(spans []mocktracer.Span, tag string) []mocktracer.Span {
+ var result []mocktracer.Span
+ for _, span := range spans {
+ if span.Tag(tag) != nil {
+ result = append(result, span)
+ }
+ }
+
+ return result
+}
+
+func showResourcesNameFromSpans(spans []mocktracer.Span) {
+ for i, span := range spans {
+ fmt.Printf(" [%d] = %v\n", i, span.Tag(ext.ResourceName))
+ }
+}
diff --git a/internal/civisibility/integrations/gotesting/testing.go b/internal/civisibility/integrations/gotesting/testing.go
index f1c35c726a..4fa6443647 100644
--- a/internal/civisibility/integrations/gotesting/testing.go
+++ b/internal/civisibility/integrations/gotesting/testing.go
@@ -7,15 +7,14 @@ package gotesting
import (
"fmt"
- "os"
"reflect"
"runtime"
- "strings"
"sync/atomic"
"testing"
"time"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
"gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations"
"gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils"
)
@@ -68,31 +67,16 @@ type (
// Run initializes CI Visibility, instruments tests and benchmarks, and runs them.
func (ddm *M) Run() int {
- integrations.EnsureCiVisibilityInitialization()
- defer integrations.ExitCiVisibility()
-
- // Create a new test session for CI visibility.
- session = integrations.CreateTestSession()
-
m := (*testing.M)(ddm)
- // Instrument the internal tests for CI visibility.
- ddm.instrumentInternalTests(getInternalTestArray(m))
-
- // Instrument the internal benchmarks for CI visibility.
- for _, v := range os.Args {
- // check if benchmarking is enabled to instrument
- if strings.Contains(v, "-bench") || strings.Contains(v, "test.bench") {
- ddm.instrumentInternalBenchmarks(getInternalBenchmarkArray(m))
- break
- }
- }
+ // Instrument testing.M
+ exitFn := instrumentTestingM(m)
// Run the tests and benchmarks.
var exitCode = m.Run()
- // Close the session and return the exit code.
- session.Close(exitCode)
+ // Finalize instrumentation
+ exitFn(exitCode)
return exitCode
}
@@ -145,23 +129,60 @@ func (ddm *M) instrumentInternalTests(internalTests *[]testing.InternalTest) {
// executeInternalTest wraps the original test function to include CI visibility instrumentation.
func (ddm *M) executeInternalTest(testInfo *testingTInfo) func(*testing.T) {
originalFunc := runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(testInfo.originalFunc)).Pointer())
- return func(t *testing.T) {
+ instrumentedFunc := func(t *testing.T) {
+ // Get the metadata regarding the execution (in case is already created from the additional features)
+ execMeta := getTestMetadata(t)
+ if execMeta == nil {
+ // in case there's no additional features then we create the metadata for this execution and defer the disposal
+ execMeta = createTestMetadata(t)
+ defer deleteTestMetadata(t)
+ }
+
// Create or retrieve the module, suite, and test for CI visibility.
module := session.GetOrCreateModuleWithFramework(testInfo.moduleName, testFramework, runtime.Version())
suite := module.GetOrCreateSuite(testInfo.suiteName)
test := suite.CreateTest(testInfo.testName)
test.SetTestFunc(originalFunc)
- setCiVisibilityTest(t, test)
+
+ // Set the CI Visibility test to the execution metadata
+ execMeta.test = test
+
+ // If the execution is for a new test we tag the test event from early flake detection
+ if execMeta.isANewTest {
+ // Set the is new test tag
+ test.SetTag(constants.TestIsNew, "true")
+ }
+
+ // If the execution is a retry we tag the test event
+ if execMeta.isARetry {
+ // Set the retry tag
+ test.SetTag(constants.TestIsRetry, "true")
+ }
+
+ startTime := time.Now()
defer func() {
+ duration := time.Since(startTime)
+ // check if is a new EFD test and the duration >= 5 min
+ if execMeta.isANewTest && duration.Minutes() >= 5 {
+ // Set the EFD retry abort reason
+ test.SetTag(constants.TestEarlyFlakeDetectionRetryAborted, "slow")
+ }
+
if r := recover(); r != nil {
// Handle panic and set error information.
- test.SetErrorInfo("panic", fmt.Sprint(r), utils.GetStacktrace(1))
+ execMeta.panicData = r
+ execMeta.panicStacktrace = utils.GetStacktrace(1)
+ test.SetErrorInfo("panic", fmt.Sprint(r), execMeta.panicStacktrace)
suite.SetTag(ext.Error, true)
module.SetTag(ext.Error, true)
test.Close(integrations.ResultStatusFail)
- checkModuleAndSuite(module, suite)
- integrations.ExitCiVisibility()
- panic(r)
+ if !execMeta.hasAdditionalFeatureWrapper {
+ // we are going to let the additional feature wrapper to handle
+ // the panic, and module and suite closing (we don't want to close the suite earlier in case of a retry)
+ checkModuleAndSuite(module, suite)
+ integrations.ExitCiVisibility()
+ panic(r)
+ }
} else {
// Normal finalization: determine the test result based on its state.
if t.Failed() {
@@ -175,13 +196,23 @@ func (ddm *M) executeInternalTest(testInfo *testingTInfo) func(*testing.T) {
test.Close(integrations.ResultStatusPass)
}
- checkModuleAndSuite(module, suite)
+ if !execMeta.hasAdditionalFeatureWrapper {
+ // we are going to let the additional feature wrapper to handle
+ // the module and suite closing (we don't want to close the suite earlier in case of a retry)
+ checkModuleAndSuite(module, suite)
+ }
}
}()
// Execute the original test function.
testInfo.originalFunc(t)
}
+
+ // Register the instrumented func as an internal instrumented func (to avoid double instrumentation)
+ setInstrumentationMetadata(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedFunc)).Pointer()), &instrumentationMetadata{IsInternal: true})
+
+ // Get the additional feature wrapper
+ return applyAdditionalFeaturesToTestFunc(instrumentedFunc, &testInfo.commonInfo)
}
// instrumentInternalBenchmarks instruments the internal benchmarks for CI visibility.
@@ -233,13 +264,13 @@ func (ddm *M) instrumentInternalBenchmarks(internalBenchmarks *[]testing.Interna
// executeInternalBenchmark wraps the original benchmark function to include CI visibility instrumentation.
func (ddm *M) executeInternalBenchmark(benchmarkInfo *testingBInfo) func(*testing.B) {
- return func(b *testing.B) {
+ originalFunc := runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(benchmarkInfo.originalFunc)).Pointer())
+ instrumentedInternalFunc := func(b *testing.B) {
// decrement level
getBenchmarkPrivateFields(b).AddLevel(-1)
startTime := time.Now()
- originalFunc := runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(benchmarkInfo.originalFunc)).Pointer())
module := session.GetOrCreateModuleWithFrameworkAndStartTime(benchmarkInfo.moduleName, testFramework, runtime.Version(), startTime)
suite := module.GetOrCreateSuiteWithStartTime(benchmarkInfo.suiteName, startTime)
test := suite.CreateTestWithStartTime(benchmarkInfo.testName, startTime)
@@ -248,7 +279,7 @@ func (ddm *M) executeInternalBenchmark(benchmarkInfo *testingBInfo) func(*testin
// Run the original benchmark function.
var iPfOfB *benchmarkPrivateFields
var recoverFunc *func(r any)
- b.Run(b.Name(), func(b *testing.B) {
+ instrumentedFunc := func(b *testing.B) {
// Stop the timer to perform initialization and replacements.
b.StopTimer()
@@ -269,14 +300,26 @@ func (ddm *M) executeInternalBenchmark(benchmarkInfo *testingBInfo) func(*testin
iPfOfB = getBenchmarkPrivateFields(b)
// Replace the benchmark function with the original one (this must be executed only once - the first iteration[b.run1]).
*iPfOfB.benchFunc = benchmarkInfo.originalFunc
- // Set the CI visibility benchmark.
- setCiVisibilityBenchmark(b, test)
+
+ // Get the metadata regarding the execution (in case is already created from the additional features)
+ execMeta := getTestMetadata(b)
+ if execMeta == nil {
+ // in case there's no additional features then we create the metadata for this execution and defer the disposal
+ execMeta = createTestMetadata(b)
+ defer deleteTestMetadata(b)
+ }
+
+ // Sets the CI Visibility test
+ execMeta.test = test
// Restart the timer and execute the original benchmark function.
b.ResetTimer()
b.StartTimer()
benchmarkInfo.originalFunc(b)
- })
+ }
+
+ setCiVisibilityBenchmarkFunc(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedFunc)).Pointer()))
+ b.Run(b.Name(), instrumentedFunc)
endTime := time.Now()
results := iPfOfB.result
@@ -332,6 +375,9 @@ func (ddm *M) executeInternalBenchmark(benchmarkInfo *testingBInfo) func(*testin
checkModuleAndSuite(module, suite)
}
+ setCiVisibilityBenchmarkFunc(originalFunc)
+ setCiVisibilityBenchmarkFunc(runtime.FuncForPC(reflect.Indirect(reflect.ValueOf(instrumentedInternalFunc)).Pointer()))
+ return instrumentedInternalFunc
}
// RunM runs the tests and benchmarks using CI visibility.
diff --git a/internal/civisibility/integrations/gotesting/testingB.go b/internal/civisibility/integrations/gotesting/testingB.go
index 0ebd7c8159..5678daf6f6 100644
--- a/internal/civisibility/integrations/gotesting/testingB.go
+++ b/internal/civisibility/integrations/gotesting/testingB.go
@@ -8,31 +8,27 @@ package gotesting
import (
"context"
"fmt"
- "reflect"
"regexp"
"runtime"
"sync"
- "sync/atomic"
"testing"
"time"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils"
)
var (
- // ciVisibilityBenchmarks holds a map of *testing.B to civisibility.DdTest for tracking benchmarks.
- ciVisibilityBenchmarks = map[*testing.B]integrations.DdTest{}
-
- // ciVisibilityBenchmarksMutex is a read-write mutex for synchronizing access to ciVisibilityBenchmarks.
- ciVisibilityBenchmarksMutex sync.RWMutex
-
// subBenchmarkAutoName is a placeholder name for CI Visibility sub-benchmarks.
subBenchmarkAutoName = "[DD:TestVisibility]"
// subBenchmarkAutoNameRegex is a regex pattern to match the sub-benchmark auto name.
subBenchmarkAutoNameRegex = regexp.MustCompile(`(?si)\/\[DD:TestVisibility\].*`)
+
+ // civisibilityBenchmarksFuncs holds a map of *runtime.Func for tracking instrumented functions
+ civisibilityBenchmarksFuncs = map[*runtime.Func]struct{}{}
+
+ // civisibilityBenchmarksFuncsMutex is a read-write mutex for synchronizing access to civisibilityBenchmarksFuncs.
+ civisibilityBenchmarksFuncsMutex sync.RWMutex
)
// B is a type alias for testing.B to provide additional methods for CI visibility.
@@ -48,127 +44,9 @@ func GetBenchmark(t *testing.B) *B { return (*B)(t) }
// A subbenchmark is like any other benchmark. A benchmark that calls Run at
// least once will not be measured itself and will be called once with N=1.
func (ddb *B) Run(name string, f func(*testing.B)) bool {
- // Reflect the function to obtain its pointer.
- fReflect := reflect.Indirect(reflect.ValueOf(f))
- moduleName, suiteName := utils.GetModuleAndSuiteName(fReflect.Pointer())
- originalFunc := runtime.FuncForPC(fReflect.Pointer())
-
- // Increment the test count in the module.
- atomic.AddInt32(modulesCounters[moduleName], 1)
-
- // Increment the test count in the suite.
- atomic.AddInt32(suitesCounters[suiteName], 1)
-
pb := (*testing.B)(ddb)
- return pb.Run(subBenchmarkAutoName, func(b *testing.B) {
- // The sub-benchmark implementation relies on creating a dummy sub benchmark (called [DD:TestVisibility]) with
- // a Run over the original sub benchmark function to get the child results without interfering measurements
- // By doing this the name of the sub-benchmark are changed
- // from:
- // benchmark/child
- // to:
- // benchmark/[DD:TestVisibility]/child
- // We use regex and decrement the depth level of the benchmark to restore the original name
-
- // Decrement level.
- bpf := getBenchmarkPrivateFields(b)
- bpf.AddLevel(-1)
-
- startTime := time.Now()
- module := session.GetOrCreateModuleWithFrameworkAndStartTime(moduleName, testFramework, runtime.Version(), startTime)
- suite := module.GetOrCreateSuiteWithStartTime(suiteName, startTime)
- test := suite.CreateTestWithStartTime(fmt.Sprintf("%s/%s", pb.Name(), name), startTime)
- test.SetTestFunc(originalFunc)
-
- // Restore the original name without the sub-benchmark auto name.
- *bpf.name = subBenchmarkAutoNameRegex.ReplaceAllString(*bpf.name, "")
-
- // Run original benchmark.
- var iPfOfB *benchmarkPrivateFields
- var recoverFunc *func(r any)
- b.Run(name, func(b *testing.B) {
- // Stop the timer to do the initialization and replacements.
- b.StopTimer()
-
- defer func() {
- if r := recover(); r != nil {
- if recoverFunc != nil {
- fn := *recoverFunc
- fn(r)
- }
- panic(r)
- }
- }()
-
- // First time we get the private fields of the inner testing.B.
- iPfOfB = getBenchmarkPrivateFields(b)
- // Replace this function with the original one (executed only once - the first iteration[b.run1]).
- *iPfOfB.benchFunc = f
- // Set b to the CI visibility test.
- setCiVisibilityBenchmark(b, test)
-
- // Enable the timer again.
- b.ResetTimer()
- b.StartTimer()
-
- // Execute original func
- f(b)
- })
-
- endTime := time.Now()
- results := iPfOfB.result
-
- // Set benchmark data for CI visibility.
- test.SetBenchmarkData("duration", map[string]any{
- "run": results.N,
- "mean": results.NsPerOp(),
- })
- test.SetBenchmarkData("memory_total_operations", map[string]any{
- "run": results.N,
- "mean": results.AllocsPerOp(),
- "statistics.max": results.MemAllocs,
- })
- test.SetBenchmarkData("mean_heap_allocations", map[string]any{
- "run": results.N,
- "mean": results.AllocedBytesPerOp(),
- })
- test.SetBenchmarkData("total_heap_allocations", map[string]any{
- "run": results.N,
- "mean": iPfOfB.result.MemBytes,
- })
- if len(results.Extra) > 0 {
- mapConverted := map[string]any{}
- for k, v := range results.Extra {
- mapConverted[k] = v
- }
- test.SetBenchmarkData("extra", mapConverted)
- }
-
- // Define a function to handle panic during benchmark finalization.
- panicFunc := func(r any) {
- test.SetErrorInfo("panic", fmt.Sprint(r), utils.GetStacktrace(1))
- suite.SetTag(ext.Error, true)
- module.SetTag(ext.Error, true)
- test.Close(integrations.ResultStatusFail)
- checkModuleAndSuite(module, suite)
- integrations.ExitCiVisibility()
- }
- recoverFunc = &panicFunc
-
- // Normal finalization: determine the benchmark result based on its state.
- if iPfOfB.B.Failed() {
- test.SetTag(ext.Error, true)
- suite.SetTag(ext.Error, true)
- module.SetTag(ext.Error, true)
- test.CloseWithFinishTime(integrations.ResultStatusFail, endTime)
- } else if iPfOfB.B.Skipped() {
- test.CloseWithFinishTime(integrations.ResultStatusSkip, endTime)
- } else {
- test.CloseWithFinishTime(integrations.ResultStatusPass, endTime)
- }
-
- checkModuleAndSuite(module, suite)
- })
+ name, f = instrumentTestingBFunc(pb, name, f)
+ return pb.Run(name, f)
}
// Context returns the CI Visibility context of the Test span.
@@ -176,9 +54,9 @@ func (ddb *B) Run(name string, f func(*testing.B)) bool {
// integration tests.
func (ddb *B) Context() context.Context {
b := (*testing.B)(ddb)
- ciTest := getCiVisibilityBenchmark(b)
- if ciTest != nil {
- return ciTest.Context()
+ ciTestItem := getTestMetadata(b)
+ if ciTestItem != nil && ciTestItem.test != nil {
+ return ciTestItem.test.Context()
}
return context.Background()
@@ -230,11 +108,7 @@ func (ddb *B) Skipf(format string, args ...any) {
// during the test. Calling SkipNow does not stop those other goroutines.
func (ddb *B) SkipNow() {
b := (*testing.B)(ddb)
- ciTest := getCiVisibilityBenchmark(b)
- if ciTest != nil {
- ciTest.Close(integrations.ResultStatusSkip)
- }
-
+ instrumentSkipNow(b)
b.SkipNow()
}
@@ -300,37 +174,31 @@ func (ddb *B) SetParallelism(p int) { (*testing.B)(ddb).SetParallelism(p) }
func (ddb *B) getBWithError(errType string, errMessage string) *testing.B {
b := (*testing.B)(ddb)
- ciTest := getCiVisibilityBenchmark(b)
- if ciTest != nil {
- ciTest.SetErrorInfo(errType, errMessage, utils.GetStacktrace(2))
- }
+ instrumentSetErrorInfo(b, errType, errMessage, 1)
return b
}
func (ddb *B) getBWithSkip(skipReason string) *testing.B {
b := (*testing.B)(ddb)
- ciTest := getCiVisibilityBenchmark(b)
- if ciTest != nil {
- ciTest.CloseWithFinishTimeAndSkipReason(integrations.ResultStatusSkip, time.Now(), skipReason)
- }
+ instrumentCloseAndSkip(b, skipReason)
return b
}
-// getCiVisibilityBenchmark retrieves the CI visibility benchmark associated with a given *testing.B.
-func getCiVisibilityBenchmark(b *testing.B) integrations.DdTest {
- ciVisibilityBenchmarksMutex.RLock()
- defer ciVisibilityBenchmarksMutex.RUnlock()
+// hasCiVisibilityBenchmarkFunc gets if a *runtime.Func is being instrumented.
+func hasCiVisibilityBenchmarkFunc(fn *runtime.Func) bool {
+ civisibilityBenchmarksFuncsMutex.RLock()
+ defer civisibilityBenchmarksFuncsMutex.RUnlock()
- if v, ok := ciVisibilityBenchmarks[b]; ok {
- return v
+ if _, ok := civisibilityBenchmarksFuncs[fn]; ok {
+ return true
}
- return nil
+ return false
}
-// setCiVisibilityBenchmark associates a CI visibility benchmark with a given *testing.B.
-func setCiVisibilityBenchmark(b *testing.B, ciTest integrations.DdTest) {
- ciVisibilityBenchmarksMutex.Lock()
- defer ciVisibilityBenchmarksMutex.Unlock()
- ciVisibilityBenchmarks[b] = ciTest
+// setCiVisibilityBenchmarkFunc tracks a *runtime.Func as instrumented benchmark.
+func setCiVisibilityBenchmarkFunc(fn *runtime.Func) {
+ civisibilityBenchmarksFuncsMutex.RLock()
+ defer civisibilityBenchmarksFuncsMutex.RUnlock()
+ civisibilityBenchmarksFuncs[fn] = struct{}{}
}
diff --git a/internal/civisibility/integrations/gotesting/testingT.go b/internal/civisibility/integrations/gotesting/testingT.go
index 2ac53c3fa8..74c2d5cee0 100644
--- a/internal/civisibility/integrations/gotesting/testingT.go
+++ b/internal/civisibility/integrations/gotesting/testingT.go
@@ -8,24 +8,10 @@ package gotesting
import (
"context"
"fmt"
- "reflect"
- "runtime"
- "sync"
- "sync/atomic"
"testing"
"time"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils"
-)
-
-var (
- // ciVisibilityTests holds a map of *testing.T to civisibility.DdTest for tracking tests.
- ciVisibilityTests = map[*testing.T]integrations.DdTest{}
-
- // ciVisibilityTestsMutex is a read-write mutex for synchronizing access to ciVisibilityTests.
- ciVisibilityTestsMutex sync.RWMutex
)
// T is a type alias for testing.T to provide additional methods for CI visibility.
@@ -44,52 +30,9 @@ func GetTest(t *testing.T) *T {
// Run may be called simultaneously from multiple goroutines, but all such calls
// must return before the outer test function for t returns.
func (ddt *T) Run(name string, f func(*testing.T)) bool {
- // Reflect the function to obtain its pointer.
- fReflect := reflect.Indirect(reflect.ValueOf(f))
- moduleName, suiteName := utils.GetModuleAndSuiteName(fReflect.Pointer())
- originalFunc := runtime.FuncForPC(fReflect.Pointer())
-
- // Increment the test count in the module.
- atomic.AddInt32(modulesCounters[moduleName], 1)
-
- // Increment the test count in the suite.
- atomic.AddInt32(suitesCounters[suiteName], 1)
-
+ f = instrumentTestingTFunc(f)
t := (*testing.T)(ddt)
- return t.Run(name, func(t *testing.T) {
- // Create or retrieve the module, suite, and test for CI visibility.
- module := session.GetOrCreateModuleWithFramework(moduleName, testFramework, runtime.Version())
- suite := module.GetOrCreateSuite(suiteName)
- test := suite.CreateTest(t.Name())
- test.SetTestFunc(originalFunc)
- setCiVisibilityTest(t, test)
- defer func() {
- if r := recover(); r != nil {
- // Handle panic and set error information.
- test.SetErrorInfo("panic", fmt.Sprint(r), utils.GetStacktrace(1))
- test.Close(integrations.ResultStatusFail)
- checkModuleAndSuite(module, suite)
- integrations.ExitCiVisibility()
- panic(r)
- } else {
- // Normal finalization: determine the test result based on its state.
- if t.Failed() {
- test.SetTag(ext.Error, true)
- suite.SetTag(ext.Error, true)
- module.SetTag(ext.Error, true)
- test.Close(integrations.ResultStatusFail)
- } else if t.Skipped() {
- test.Close(integrations.ResultStatusSkip)
- } else {
- test.Close(integrations.ResultStatusPass)
- }
- checkModuleAndSuite(module, suite)
- }
- }()
-
- // Execute the original test function.
- f(t)
- })
+ return t.Run(name, f)
}
// Context returns the CI Visibility context of the Test span.
@@ -97,9 +40,9 @@ func (ddt *T) Run(name string, f func(*testing.T)) bool {
// integration tests.
func (ddt *T) Context() context.Context {
t := (*testing.T)(ddt)
- ciTest := getCiVisibilityTest(t)
- if ciTest != nil {
- return ciTest.Context()
+ ciTestItem := getTestMetadata(t)
+ if ciTestItem != nil && ciTestItem.test != nil {
+ return ciTestItem.test.Context()
}
return context.Background()
@@ -151,11 +94,7 @@ func (ddt *T) Skipf(format string, args ...any) {
// during the test. Calling SkipNow does not stop those other goroutines.
func (ddt *T) SkipNow() {
t := (*testing.T)(ddt)
- ciTest := getCiVisibilityTest(t)
- if ciTest != nil {
- ciTest.Close(integrations.ResultStatusSkip)
- }
-
+ instrumentSkipNow(t)
t.SkipNow()
}
@@ -180,37 +119,12 @@ func (ddt *T) Setenv(key, value string) { (*testing.T)(ddt).Setenv(key, value) }
func (ddt *T) getTWithError(errType string, errMessage string) *testing.T {
t := (*testing.T)(ddt)
- ciTest := getCiVisibilityTest(t)
- if ciTest != nil {
- ciTest.SetErrorInfo(errType, errMessage, utils.GetStacktrace(2))
- }
+ instrumentSetErrorInfo(t, errType, errMessage, 1)
return t
}
func (ddt *T) getTWithSkip(skipReason string) *testing.T {
t := (*testing.T)(ddt)
- ciTest := getCiVisibilityTest(t)
- if ciTest != nil {
- ciTest.CloseWithFinishTimeAndSkipReason(integrations.ResultStatusSkip, time.Now(), skipReason)
- }
+ instrumentCloseAndSkip(t, skipReason)
return t
}
-
-// getCiVisibilityTest retrieves the CI visibility test associated with a given *testing.T.
-func getCiVisibilityTest(t *testing.T) integrations.DdTest {
- ciVisibilityTestsMutex.RLock()
- defer ciVisibilityTestsMutex.RUnlock()
-
- if v, ok := ciVisibilityTests[t]; ok {
- return v
- }
-
- return nil
-}
-
-// setCiVisibilityTest associates a CI visibility test with a given *testing.T.
-func setCiVisibilityTest(t *testing.T, ciTest integrations.DdTest) {
- ciVisibilityTestsMutex.Lock()
- defer ciVisibilityTestsMutex.Unlock()
- ciVisibilityTests[t] = ciTest
-}
diff --git a/internal/civisibility/integrations/gotesting/testing_test.go b/internal/civisibility/integrations/gotesting/testing_test.go
index e45e62d15d..1c2e1b2556 100644
--- a/internal/civisibility/integrations/gotesting/testing_test.go
+++ b/internal/civisibility/integrations/gotesting/testing_test.go
@@ -7,76 +7,17 @@ package gotesting
import (
"fmt"
- "net/http"
- "net/http/httptest"
- "os"
- "strconv"
+ "runtime"
+ "slices"
"testing"
- ddhttp "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
- ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
- "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/integrations"
"github.com/stretchr/testify/assert"
)
-var currentM *testing.M
-var mTracer mocktracer.Tracer
-
-// TestMain is the entry point for testing and runs before any test.
-func TestMain(m *testing.M) {
- currentM = m
- mTracer = integrations.InitializeCIVisibilityMock()
-
- // (*M)(m).Run() cast m to gotesting.M and just run
- // or use a helper method gotesting.RunM(m)
-
- // os.Exit((*M)(m).Run())
- _ = RunM(m)
-
- finishedSpans := mTracer.FinishedSpans()
- // 1 session span
- // 1 module span
- // 1 suite span (optional 1 from reflections_test.go)
- // 6 tests spans
- // 7 sub stest spans
- // 2 normal spans (from integration tests)
- // 1 benchmark span (optional - require the -bench option)
- if len(finishedSpans) < 17 {
- panic("expected at least 17 finished spans, got " + strconv.Itoa(len(finishedSpans)))
- }
-
- sessionSpans := getSpansWithType(finishedSpans, constants.SpanTypeTestSession)
- if len(sessionSpans) != 1 {
- panic("expected exactly 1 session span, got " + strconv.Itoa(len(sessionSpans)))
- }
-
- moduleSpans := getSpansWithType(finishedSpans, constants.SpanTypeTestModule)
- if len(moduleSpans) != 1 {
- panic("expected exactly 1 module span, got " + strconv.Itoa(len(moduleSpans)))
- }
-
- suiteSpans := getSpansWithType(finishedSpans, constants.SpanTypeTestSuite)
- if len(suiteSpans) < 1 {
- panic("expected at least 1 suite span, got " + strconv.Itoa(len(suiteSpans)))
- }
-
- testSpans := getSpansWithType(finishedSpans, constants.SpanTypeTest)
- if len(testSpans) < 12 {
- panic("expected at least 12 suite span, got " + strconv.Itoa(len(testSpans)))
- }
-
- httpSpans := getSpansWithType(finishedSpans, ext.SpanTypeHTTP)
- if len(httpSpans) != 2 {
- panic("expected exactly 2 normal spans, got " + strconv.Itoa(len(httpSpans)))
- }
-
- os.Exit(0)
-}
-
// TestMyTest01 demonstrates instrumentation of InternalTests
func TestMyTest01(t *testing.T) {
assertTest(t)
@@ -107,106 +48,92 @@ func Test_Foo(gt *testing.T) {
assertTest(gt)
t := (*T)(gt)
var tests = []struct {
+ index byte
name string
input string
want string
}{
- {"yellow should return color", "yellow", "color"},
- {"banana should return fruit", "banana", "fruit"},
- {"duck should return animal", "duck", "animal"},
+ {1, "yellow should return color", "yellow", "color"},
+ {2, "banana should return fruit", "banana", "fruit"},
+ {3, "duck should return animal", "duck", "animal"},
}
+ buf := []byte{}
for _, test := range tests {
+ test := test
t.Run(test.name, func(t *testing.T) {
t.Log(test.name)
+ buf = append(buf, test.index)
})
}
+
+ expected := []byte{1, 2, 3}
+ if !slices.Equal(buf, expected) {
+ t.Error("error in subtests closure")
+ }
}
-// TestWithExternalCalls demonstrates testing with external HTTP calls.
-func TestWithExternalCalls(gt *testing.T) {
+// TestSkip demonstrates skipping a test with a message.
+func TestSkip(gt *testing.T) {
assertTest(gt)
- t := (*T)(gt)
-
- // Create a new HTTP test server
- s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write([]byte("Hello World"))
- }))
- defer s.Close()
-
- t.Run("default", func(t *testing.T) {
- // if we want to use the test span as a parent of a child span
- // we can extract the SpanContext and use it in other integrations
- ctx := (*T)(t).Context()
+ t := (*T)(gt)
- // Wrap the default HTTP transport for tracing
- rt := ddhttp.WrapRoundTripper(http.DefaultTransport)
- client := &http.Client{
- Transport: rt,
- }
+ // because we use the instrumented Skip
+ // the message will be reported as the skip reason.
+ t.Skip("Nothing to do here, skipping!")
+}
- // Create a new HTTP request
- req, err := http.NewRequest("GET", s.URL+"/hello/world", nil)
- if err != nil {
- t.FailNow()
- }
+// Tests for test retries feature
- // Use the span context here so the http span will appear as a child of the test
- req = req.WithContext(ctx)
+var testRetryWithPanicRunNumber = 0
- res, err := client.Do(req)
- if err != nil {
- t.FailNow()
+func TestRetryWithPanic(t *testing.T) {
+ t.Cleanup(func() {
+ if testRetryWithPanicRunNumber == 1 {
+ fmt.Println("CleanUp from the initial execution")
+ } else {
+ fmt.Println("CleanUp from the retry")
}
- _ = res.Body.Close()
})
+ testRetryWithPanicRunNumber++
+ if testRetryWithPanicRunNumber < 4 {
+ panic("Test Panic")
+ }
+}
- t.Run("custom-name", func(t *testing.T) {
-
- // we can also add custom tags to the test span by retrieving the
- // context and call the `ddtracer.SpanFromContext` api
- ctx := (*T)(t).Context()
- span, _ := ddtracer.SpanFromContext(ctx)
-
- // Custom namer function for the HTTP request
- customNamer := func(req *http.Request) string {
- value := fmt.Sprintf("%s %s", req.Method, req.URL.Path)
-
- // Then we can set custom tags to that test span
- span.SetTag("customNamer.Value", value)
- return value
- }
-
- rt := ddhttp.WrapRoundTripper(http.DefaultTransport, ddhttp.RTWithResourceNamer(customNamer))
- client := &http.Client{
- Transport: rt,
- }
-
- req, err := http.NewRequest("GET", s.URL+"/hello/world", nil)
- if err != nil {
- t.FailNow()
- }
-
- // Use the span context here so the http span will appear as a child of the test
- req = req.WithContext(ctx)
+var testRetryWithFailRunNumber = 0
- res, err := client.Do(req)
- if err != nil {
- t.FailNow()
+func TestRetryWithFail(t *testing.T) {
+ t.Cleanup(func() {
+ if testRetryWithFailRunNumber == 1 {
+ fmt.Println("CleanUp from the initial execution")
+ } else {
+ fmt.Println("CleanUp from the retry")
}
- _ = res.Body.Close()
})
+ testRetryWithFailRunNumber++
+ if testRetryWithFailRunNumber < 4 {
+ t.Fatal("Failed due the wrong execution number")
+ }
}
-// TestSkip demonstrates skipping a test with a message.
-func TestSkip(gt *testing.T) {
- assertTest(gt)
+func TestRetryAlwaysFail(t *testing.T) {
+ t.Parallel()
+ t.Fatal("Always fail to test the auto retries feature")
+}
- t := (*T)(gt)
+func TestNormalPassingAfterRetryAlwaysFail(t *testing.T) {}
- // because we use the instrumented Skip
- // the message will be reported as the skip reason.
- t.Skip("Nothing to do here, skipping!")
+var run int
+
+func TestEarlyFlakeDetection(t *testing.T) {
+ run++
+ fmt.Printf(" Run: %d", run)
+ if run%2 == 0 {
+ fmt.Println(" Failed")
+ t.FailNow()
+ }
+ fmt.Println(" Passed")
}
// BenchmarkFirst demonstrates benchmark instrumentation with sub-benchmarks.
@@ -315,8 +242,9 @@ func assertCommon(assert *assert.Assertions, span mocktracer.Span) {
spanTags := span.Tags()
assert.Subset(spanTags, map[string]interface{}{
- constants.Origin: constants.CIAppTestOrigin,
- constants.TestType: constants.TestTypeTest,
+ constants.Origin: constants.CIAppTestOrigin,
+ constants.TestType: constants.TestTypeTest,
+ constants.LogicalCPUCores: float64(runtime.NumCPU()),
})
assert.Contains(spanTags, ext.ResourceName)
@@ -340,14 +268,3 @@ func assertCommon(assert *assert.Assertions, span mocktracer.Span) {
}
assert.Contains(spanTags, constants.CIWorkspacePath)
}
-
-func getSpansWithType(spans []mocktracer.Span, spanType string) []mocktracer.Span {
- var result []mocktracer.Span
- for _, span := range spans {
- if span.Tag(ext.SpanType) == spanType {
- result = append(result, span)
- }
- }
-
- return result
-}
diff --git a/internal/civisibility/integrations/manual_api.go b/internal/civisibility/integrations/manual_api.go
index 276898ad1a..2261233422 100644
--- a/internal/civisibility/integrations/manual_api.go
+++ b/internal/civisibility/integrations/manual_api.go
@@ -211,6 +211,15 @@ func fillCommonTags(opts []tracer.StartSpanOption) []tracer.StartSpanOption {
// Apply CI tags
for k, v := range utils.GetCITags() {
+ // Ignore the test session name (sent at the payload metadata level, see `civisibility_payload.go`)
+ if k == constants.TestSessionName {
+ continue
+ }
+ opts = append(opts, tracer.Tag(k, v))
+ }
+
+ // Apply CI metrics
+ for k, v := range utils.GetCIMetrics() {
opts = append(opts, tracer.Tag(k, v))
}
diff --git a/internal/civisibility/integrations/manual_api_ddtest.go b/internal/civisibility/integrations/manual_api_ddtest.go
index cdb01f4d4b..5ca025a783 100644
--- a/internal/civisibility/integrations/manual_api_ddtest.go
+++ b/internal/civisibility/integrations/manual_api_ddtest.go
@@ -8,6 +8,9 @@ package integrations
import (
"context"
"fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
"runtime"
"strings"
"time"
@@ -134,11 +137,58 @@ func (t *tslvTest) SetTestFunc(fn *runtime.Func) {
return
}
- file, line := fn.FileLine(fn.Entry())
- file = utils.GetRelativePathFromCITagsSourceRoot(file)
+ // let's get the file path and the start line of the function
+ absolutePath, startLine := fn.FileLine(fn.Entry())
+ file := utils.GetRelativePathFromCITagsSourceRoot(absolutePath)
t.SetTag(constants.TestSourceFile, file)
- t.SetTag(constants.TestSourceStartLine, line)
+ t.SetTag(constants.TestSourceStartLine, startLine)
+
+ // now, let's try to get the end line of the function using ast
+ // parse the entire file where the function is defined to create an abstract syntax tree (AST)
+ // if we can't parse the file (source code is not available) we silently bail out
+ fset := token.NewFileSet()
+ fileNode, err := parser.ParseFile(fset, absolutePath, nil, parser.AllErrors)
+ if err == nil {
+ // get the function name without the package name
+ fullName := fn.Name()
+ firstDot := strings.LastIndex(fullName, ".") + 1
+ name := fullName[firstDot:]
+
+ // variable to store the ending line of the function
+ var endLine int
+ // traverse the AST to find the function declaration for the target function
+ ast.Inspect(fileNode, func(n ast.Node) bool {
+ // check if the current node is a function declaration
+ if funcDecl, ok := n.(*ast.FuncDecl); ok {
+ // if the function name matches the target function name
+ if funcDecl.Name.Name == name {
+ // get the line number of the end of the function body
+ endLine = fset.Position(funcDecl.Body.End()).Line
+ // stop further inspection since we have found the target function
+ return false
+ }
+ }
+ // check if the current node is a function literal (FuncLit)
+ if funcLit, ok := n.(*ast.FuncLit); ok {
+ // get the line number of the start of the function literal
+ funcStartLine := fset.Position(funcLit.Body.Pos()).Line
+ // if the start line matches the known start line, record the end line
+ if funcStartLine == startLine {
+ endLine = fset.Position(funcLit.Body.End()).Line
+ return false // stop further inspection since we have found the function
+ }
+ }
+ // continue inspecting other nodes
+ return true
+ })
+
+ // if we found an endLine we check is greater than the calculated startLine
+ if endLine >= startLine {
+ t.SetTag(constants.TestSourceEndLine, endLine)
+ }
+ }
+ // get the codeowner of the function
codeOwners := utils.GetCodeOwners()
if codeOwners != nil {
match, found := codeOwners.Match("/" + file)
diff --git a/internal/civisibility/integrations/manual_api_ddtestsession.go b/internal/civisibility/integrations/manual_api_ddtestsession.go
index 807cd835da..64b522b3b2 100644
--- a/internal/civisibility/integrations/manual_api_ddtestsession.go
+++ b/internal/civisibility/integrations/manual_api_ddtestsession.go
@@ -9,8 +9,6 @@ import (
"context"
"fmt"
"os"
- "path/filepath"
- "regexp"
"strings"
"time"
@@ -37,23 +35,11 @@ type tslvTestSession struct {
// CreateTestSession initializes a new test session. It automatically determines the command and working directory.
func CreateTestSession() DdTestSession {
- var cmd string
- if len(os.Args) == 1 {
- cmd = filepath.Base(os.Args[0])
- } else {
- cmd = fmt.Sprintf("%s %s ", filepath.Base(os.Args[0]), strings.Join(os.Args[1:], " "))
- }
-
- // Filter out some parameters to make the command more stable.
- cmd = regexp.MustCompile(`(?si)-test.gocoverdir=(.*)\s`).ReplaceAllString(cmd, "")
- cmd = regexp.MustCompile(`(?si)-test.v=(.*)\s`).ReplaceAllString(cmd, "")
- cmd = regexp.MustCompile(`(?si)-test.testlogfile=(.*)\s`).ReplaceAllString(cmd, "")
- cmd = strings.TrimSpace(cmd)
wd, err := os.Getwd()
if err == nil {
wd = utils.GetRelativePathFromCITagsSourceRoot(wd)
}
- return CreateTestSessionWith(cmd, wd, "", time.Now())
+ return CreateTestSessionWith(utils.GetCITags()[constants.TestCommand], wd, "", time.Now())
}
// CreateTestSessionWith initializes a new test session with specified command, working directory, framework, and start time.
diff --git a/internal/civisibility/integrations/manual_api_mocktracer_test.go b/internal/civisibility/integrations/manual_api_mocktracer_test.go
index afb6d321e4..ba27d37249 100644
--- a/internal/civisibility/integrations/manual_api_mocktracer_test.go
+++ b/internal/civisibility/integrations/manual_api_mocktracer_test.go
@@ -248,6 +248,39 @@ func Test(t *testing.T) {
test.Close(ResultStatusSkip)
}
+func TestWithInnerFunc(t *testing.T) {
+ mockTracer.Reset()
+ assert := assert.New(t)
+
+ now := time.Now()
+ session, module, suite, test := createDDTest(now)
+ defer func() {
+ session.Close(0)
+ module.Close()
+ suite.Close()
+ }()
+ test.SetError(errors.New("we keep the last error"))
+ test.SetErrorInfo("my-type", "my-message", "my-stack")
+ func() {
+ pc, _, _, _ := runtime.Caller(0)
+ test.SetTestFunc(runtime.FuncForPC(pc))
+ }()
+
+ assert.NotNil(test.Context())
+ assert.Equal("my-test", test.Name())
+ assert.Equal(now, test.StartTime())
+ assert.Equal(suite, test.Suite())
+
+ test.Close(ResultStatusPass)
+
+ finishedSpans := mockTracer.FinishedSpans()
+ assert.Equal(1, len(finishedSpans))
+ testAssertions(assert, now, finishedSpans[0])
+
+ //no-op call
+ test.Close(ResultStatusSkip)
+}
+
func testAssertions(assert *assert.Assertions, now time.Time, testSpan mocktracer.Span) {
assert.Equal(now, testSpan.StartTime())
assert.Equal("my-module-framework.test", testSpan.OperationName())
@@ -272,6 +305,12 @@ func testAssertions(assert *assert.Assertions, now time.Time, testSpan mocktrace
assert.Contains(spanTags, constants.TestModuleIDTag)
assert.Contains(spanTags, constants.TestSuiteIDTag)
assert.Contains(spanTags, constants.TestSourceFile)
+
+ // make sure we have both start and end line
assert.Contains(spanTags, constants.TestSourceStartLine)
+ assert.Contains(spanTags, constants.TestSourceEndLine)
+ // make sure the startLine < endLine
+ assert.Less(spanTags[constants.TestSourceStartLine].(int), spanTags[constants.TestSourceEndLine].(int))
+
commonAssertions(assert, testSpan)
}
diff --git a/internal/civisibility/utils/ci_providers.go b/internal/civisibility/utils/ci_providers.go
index bb8b6940d5..17f0f304ad 100644
--- a/internal/civisibility/utils/ci_providers.go
+++ b/internal/civisibility/utils/ci_providers.go
@@ -14,6 +14,7 @@ import (
"strings"
"gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
)
// providerType defines a function type that returns a map of string key-value pairs.
@@ -77,6 +78,14 @@ func getProviderTags() map[string]string {
}
}
+ if log.DebugEnabled() {
+ if providerName, ok := tags[constants.CIProviderName]; ok {
+ log.Debug("civisibility: detected ci provider: %v", providerName)
+ } else {
+ log.Debug("civisibility: no ci provider was detected.")
+ }
+ }
+
return tags
}
@@ -399,6 +408,38 @@ func extractGithubActions() map[string]string {
tags[constants.CIEnvVars] = string(jsonString)
}
+ // Extract PR information from the github event json file
+ eventFilePath := os.Getenv("GITHUB_EVENT_PATH")
+ if stats, ok := os.Stat(eventFilePath); ok == nil && !stats.IsDir() {
+ if eventFile, err := os.Open(eventFilePath); err == nil {
+ defer eventFile.Close()
+
+ var eventJson struct {
+ PullRequest struct {
+ Base struct {
+ Sha string `json:"sha"`
+ Ref string `json:"ref"`
+ } `json:"base"`
+ Head struct {
+ Sha string `json:"sha"`
+ } `json:"head"`
+ } `json:"pull_request"`
+ }
+
+ eventDecoder := json.NewDecoder(eventFile)
+ if eventDecoder.Decode(&eventJson) == nil {
+ tags[constants.GitHeadCommit] = eventJson.PullRequest.Head.Sha
+ tags[constants.GitPrBaseCommit] = eventJson.PullRequest.Base.Sha
+ tags[constants.GitPrBaseBranch] = eventJson.PullRequest.Base.Ref
+ }
+ }
+ }
+
+ // Fallback if GitPrBaseBranch is not set
+ if tmpVal, ok := tags[constants.GitPrBaseBranch]; !ok || tmpVal == "" {
+ tags[constants.GitPrBaseBranch] = os.Getenv("GITHUB_BASE_REF")
+ }
+
return tags
}
diff --git a/internal/civisibility/utils/ci_providers_test.go b/internal/civisibility/utils/ci_providers_test.go
index b85acc1efc..8aeed064e7 100644
--- a/internal/civisibility/utils/ci_providers_test.go
+++ b/internal/civisibility/utils/ci_providers_test.go
@@ -13,6 +13,8 @@ import (
"path/filepath"
"strings"
"testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
)
func setEnvs(t *testing.T, env map[string]string) {
@@ -114,3 +116,43 @@ func TestTags(t *testing.T) {
})
}
}
+
+func TestGitHubEventFile(t *testing.T) {
+ originalEventPath := os.Getenv("GITHUB_EVENT_PATH")
+ originalBaseRef := os.Getenv("GITHUB_BASE_REF")
+ defer func() {
+ os.Setenv("GITHUB_EVENT_PATH", originalEventPath)
+ os.Setenv("GITHUB_BASE_REF", originalBaseRef)
+ }()
+
+ os.Unsetenv("GITHUB_EVENT_PATH")
+ os.Unsetenv("GITHUB_BASE_REF")
+
+ checkValue := func(tags map[string]string, key, expectedValue string) {
+ if tags[key] != expectedValue {
+ t.Fatalf("Key: %s, the actual value (%s) is different to the expected value (%s)", key, tags[key], expectedValue)
+ }
+ }
+
+ t.Run("with event file", func(t *testing.T) {
+ eventFile := "testdata/fixtures/github-event.json"
+ t.Setenv("GITHUB_EVENT_PATH", eventFile)
+ t.Setenv("GITHUB_BASE_REF", "my-base-ref") // this should be ignored in favor of the event file value
+
+ tags := extractGithubActions()
+ expectedHeadCommit := "df289512a51123083a8e6931dd6f57bb3883d4c4"
+ expectedBaseCommit := "52e0974c74d41160a03d59ddc73bb9f5adab054b"
+ expectedBaseRef := "main"
+
+ checkValue(tags, constants.GitHeadCommit, expectedHeadCommit)
+ checkValue(tags, constants.GitPrBaseCommit, expectedBaseCommit)
+ checkValue(tags, constants.GitPrBaseBranch, expectedBaseRef)
+ })
+
+ t.Run("no event file", func(t *testing.T) {
+ t.Setenv("GITHUB_BASE_REF", "my-base-ref") // this should be ignored in favor of the event file value
+
+ tags := extractGithubActions()
+ checkValue(tags, constants.GitPrBaseBranch, "my-base-ref")
+ })
+}
diff --git a/internal/civisibility/utils/codeowners.go b/internal/civisibility/utils/codeowners.go
index fa930d5af2..0674945151 100644
--- a/internal/civisibility/utils/codeowners.go
+++ b/internal/civisibility/utils/codeowners.go
@@ -76,6 +76,9 @@ func GetCodeOwners() *CodeOwners {
if _, err := os.Stat(path); err == nil {
codeowners, err = NewCodeOwners(path)
if err == nil {
+ if logger.DebugEnabled() {
+ logger.Debug("civisibility: codeowner file '%v' was loaded successfully.", path)
+ }
return codeowners
}
logger.Debug("Error parsing codeowners: %s", err)
diff --git a/internal/civisibility/utils/environmentTags.go b/internal/civisibility/utils/environmentTags.go
index 0b64b8be86..8904596e59 100644
--- a/internal/civisibility/utils/environmentTags.go
+++ b/internal/civisibility/utils/environmentTags.go
@@ -6,11 +6,16 @@
package utils
import (
+ "fmt"
+ "os"
"path/filepath"
+ "regexp"
"runtime"
+ "strings"
"sync"
"gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
"gopkg.in/DataDog/dd-trace-go.v1/internal/osinfo"
)
@@ -18,6 +23,10 @@ var (
// ciTags holds the CI/CD environment variable information.
ciTags map[string]string
ciTagsMutex sync.Mutex
+
+ // ciMetrics holds the CI/CD environment numeric variable information
+ ciMetrics map[string]float64
+ ciMetricsMutex sync.Mutex
)
// GetCITags retrieves and caches the CI/CD tags from environment variables.
@@ -38,6 +47,24 @@ func GetCITags() map[string]string {
return ciTags
}
+// GetCIMetrics retrieves and caches the CI/CD metrics from environment variables.
+// It initializes the ciMetrics map if it is not already initialized.
+// This function is thread-safe due to the use of a mutex.
+//
+// Returns:
+//
+// A map[string]float64 containing the CI/CD metrics.
+func GetCIMetrics() map[string]float64 {
+ ciMetricsMutex.Lock()
+ defer ciMetricsMutex.Unlock()
+
+ if ciMetrics == nil {
+ ciMetrics = createCIMetricsMap()
+ }
+
+ return ciMetrics
+}
+
// GetRelativePathFromCITagsSourceRoot calculates the relative path from the CI workspace root to the specified path.
// If the CI workspace root is not available in the tags, it returns the original path.
//
@@ -68,12 +95,44 @@ func GetRelativePathFromCITagsSourceRoot(path string) string {
// A map[string]string containing the extracted CI/CD tags.
func createCITagsMap() map[string]string {
localTags := getProviderTags()
+
+ // Populate runtime values
localTags[constants.OSPlatform] = runtime.GOOS
localTags[constants.OSVersion] = osinfo.OSVersion()
localTags[constants.OSArchitecture] = runtime.GOARCH
localTags[constants.RuntimeName] = runtime.Compiler
localTags[constants.RuntimeVersion] = runtime.Version()
+ log.Debug("civisibility: os platform: %v", runtime.GOOS)
+ log.Debug("civisibility: os architecture: %v", runtime.GOARCH)
+ log.Debug("civisibility: runtime version: %v", runtime.Version())
+ // Get command line test command
+ var cmd string
+ if len(os.Args) == 1 {
+ cmd = filepath.Base(os.Args[0])
+ } else {
+ cmd = fmt.Sprintf("%s %s ", filepath.Base(os.Args[0]), strings.Join(os.Args[1:], " "))
+ }
+
+ // Filter out some parameters to make the command more stable.
+ cmd = regexp.MustCompile(`(?si)-test.gocoverdir=(.*)\s`).ReplaceAllString(cmd, "")
+ cmd = regexp.MustCompile(`(?si)-test.v=(.*)\s`).ReplaceAllString(cmd, "")
+ cmd = regexp.MustCompile(`(?si)-test.testlogfile=(.*)\s`).ReplaceAllString(cmd, "")
+ cmd = strings.TrimSpace(cmd)
+ localTags[constants.TestCommand] = cmd
+ log.Debug("civisibility: test command: %v", cmd)
+
+ // Populate the test session name
+ if testSessionName, ok := os.LookupEnv(constants.CIVisibilityTestSessionNameEnvironmentVariable); ok {
+ localTags[constants.TestSessionName] = testSessionName
+ } else if jobName, ok := localTags[constants.CIJobName]; ok {
+ localTags[constants.TestSessionName] = fmt.Sprintf("%s-%s", jobName, cmd)
+ } else {
+ localTags[constants.TestSessionName] = cmd
+ }
+ log.Debug("civisibility: test session name: %v", localTags[constants.TestSessionName])
+
+ // Populate missing git data
gitData, _ := getLocalGitData()
// Populate Git metadata from the local Git repository if not already present in localTags
@@ -115,5 +174,20 @@ func createCITagsMap() map[string]string {
}
}
+ log.Debug("civisibility: workspace directory: %v", localTags[constants.CIWorkspacePath])
+ log.Debug("civisibility: common tags created with %v items", len(localTags))
return localTags
}
+
+// createCIMetricsMap creates a map of CI/CD tags by extracting information from environment variables and runtime information.
+//
+// Returns:
+//
+// A map[string]float64 containing the metrics extracted
+func createCIMetricsMap() map[string]float64 {
+ localMetrics := make(map[string]float64)
+ localMetrics[constants.LogicalCPUCores] = float64(runtime.NumCPU())
+
+ log.Debug("civisibility: common metrics created with %v items", len(localMetrics))
+ return localMetrics
+}
diff --git a/internal/civisibility/utils/environmentTags_test.go b/internal/civisibility/utils/environmentTags_test.go
index 694a2b1217..3baf20a87e 100644
--- a/internal/civisibility/utils/environmentTags_test.go
+++ b/internal/civisibility/utils/environmentTags_test.go
@@ -25,6 +25,18 @@ func TestGetCITagsCache(t *testing.T) {
assert.Equal(t, "newvalue", tags["key"])
}
+func TestGetCIMetricsCache(t *testing.T) {
+ ciMetrics = map[string]float64{"key": float64(1)}
+
+ // First call to initialize ciMetrics
+ tags := GetCIMetrics()
+ assert.Equal(t, float64(1), tags["key"])
+
+ tags["key"] = float64(42)
+ tags = GetCIMetrics()
+ assert.Equal(t, float64(42), tags["key"])
+}
+
func TestGetRelativePathFromCITagsSourceRoot(t *testing.T) {
ciTags = map[string]string{constants.CIWorkspacePath: "/ci/workspace"}
absPath := "/ci/workspace/subdir/file.txt"
diff --git a/internal/civisibility/utils/git.go b/internal/civisibility/utils/git.go
index fb62a37567..6e5284f584 100644
--- a/internal/civisibility/utils/git.go
+++ b/internal/civisibility/utils/git.go
@@ -7,13 +7,22 @@ package utils
import (
"errors"
+ "fmt"
+ "os"
"os/exec"
+ "path/filepath"
"regexp"
"strconv"
"strings"
+ "sync"
"time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
)
+// MaxPackFileSizeInMb is the maximum size of a pack file in megabytes.
+const MaxPackFileSizeInMb = 3
+
// localGitData holds various pieces of information about the local Git repository,
// including the source root, repository URL, branch, commit SHA, author and committer details, and commit message.
type localGitData struct {
@@ -30,8 +39,69 @@ type localGitData struct {
CommitMessage string
}
-// regexpSensitiveInfo is a regular expression used to match and filter out sensitive information from URLs.
-var regexpSensitiveInfo = regexp.MustCompile("(https?://|ssh?://)[^/]*@")
+var (
+ // regexpSensitiveInfo is a regular expression used to match and filter out sensitive information from URLs.
+ regexpSensitiveInfo = regexp.MustCompile("(https?://|ssh?://)[^/]*@")
+
+ // isGitFoundValue is a boolean flag indicating whether the Git executable is available on the system.
+ isGitFoundValue bool
+
+ // gitFinder is a sync.Once instance used to ensure that the Git executable is only checked once.
+ gitFinder sync.Once
+)
+
+// isGitFound checks if the Git executable is available on the system.
+func isGitFound() bool {
+ gitFinder.Do(func() {
+ _, err := exec.LookPath("git")
+ isGitFoundValue = err == nil
+ if err != nil {
+ log.Debug("civisibility.git: git executable not found")
+ }
+ })
+ return isGitFoundValue
+}
+
+// execGit executes a Git command with the given arguments.
+func execGit(args ...string) ([]byte, error) {
+ if !isGitFound() {
+ return nil, errors.New("git executable not found")
+ }
+ return exec.Command("git", args...).CombinedOutput()
+}
+
+// execGitString executes a Git command with the given arguments and returns the output as a string.
+func execGitString(args ...string) (string, error) {
+ out, err := execGit(args...)
+ strOut := strings.TrimSpace(strings.Trim(string(out), "\n"))
+ return strOut, err
+}
+
+// execGitStringWithInput executes a Git command with the given input and arguments and returns the output as a string.
+func execGitStringWithInput(input string, args ...string) (string, error) {
+ cmd := exec.Command("git", args...)
+ cmd.Stdin = strings.NewReader(input)
+ out, err := cmd.CombinedOutput()
+ strOut := strings.TrimSpace(strings.Trim(string(out), "\n"))
+ return strOut, err
+}
+
+// getGitVersion retrieves the version of the Git executable installed on the system.
+func getGitVersion() (major int, minor int, patch int, error error) {
+ out, err := execGitString("--version")
+ if err != nil {
+ return 0, 0, 0, err
+ }
+ out = strings.TrimSpace(strings.ReplaceAll(out, "git version ", ""))
+ versionParts := strings.Split(out, ".")
+ if len(versionParts) < 3 {
+ return 0, 0, 0, errors.New("invalid git version")
+ }
+ major, _ = strconv.Atoi(versionParts[0])
+ minor, _ = strconv.Atoi(versionParts[1])
+ patch, _ = strconv.Atoi(versionParts[2])
+ return major, minor, patch, nil
+}
// getLocalGitData retrieves information about the local Git repository from the current HEAD.
// It gathers details such as the repository URL, current branch, latest commit SHA, author and committer details, and commit message.
@@ -43,32 +113,40 @@ var regexpSensitiveInfo = regexp.MustCompile("(https?://|ssh?://)[^/]*@")
func getLocalGitData() (localGitData, error) {
gitData := localGitData{}
+ if !isGitFound() {
+ return gitData, errors.New("git executable not found")
+ }
+
// Extract the absolute path to the Git directory
- out, err := exec.Command("git", "rev-parse", "--absolute-git-dir").Output()
+ log.Debug("civisibility.git: getting the absolute path to the Git directory")
+ out, err := execGitString("rev-parse", "--absolute-git-dir")
if err == nil {
- gitData.SourceRoot = strings.ReplaceAll(strings.Trim(string(out), "\n"), ".git", "")
+ gitData.SourceRoot = strings.ReplaceAll(out, ".git", "")
}
// Extract the repository URL
- out, err = exec.Command("git", "ls-remote", "--get-url").Output()
+ log.Debug("civisibility.git: getting the repository URL")
+ out, err = execGitString("ls-remote", "--get-url")
if err == nil {
- gitData.RepositoryURL = filterSensitiveInfo(strings.Trim(string(out), "\n"))
+ gitData.RepositoryURL = filterSensitiveInfo(out)
}
// Extract the current branch name
- out, err = exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD").Output()
+ log.Debug("civisibility.git: getting the current branch name")
+ out, err = execGitString("rev-parse", "--abbrev-ref", "HEAD")
if err == nil {
- gitData.Branch = strings.Trim(string(out), "\n")
+ gitData.Branch = out
}
// Get commit details from the latest commit using git log (git log -1 --pretty='%H","%aI","%an","%ae","%cI","%cn","%ce","%B')
- out, err = exec.Command("git", "log", "-1", "--pretty=%H\",\"%at\",\"%an\",\"%ae\",\"%ct\",\"%cn\",\"%ce\",\"%B").Output()
+ log.Debug("civisibility.git: getting the latest commit details")
+ out, err = execGitString("log", "-1", "--pretty=%H\",\"%at\",\"%an\",\"%ae\",\"%ct\",\"%cn\",\"%ce\",\"%B")
if err != nil {
return gitData, err
}
// Split the output into individual components
- outArray := strings.Split(string(out), "\",\"")
+ outArray := strings.Split(out, "\",\"")
if len(outArray) < 8 {
return gitData, errors.New("git log failed")
}
@@ -89,6 +167,133 @@ func getLocalGitData() (localGitData, error) {
return gitData, nil
}
+// GetLastLocalGitCommitShas retrieves the commit SHAs of the last 1000 commits in the local Git repository.
+func GetLastLocalGitCommitShas() []string {
+ // git log --format=%H -n 1000 --since=\"1 month ago\"
+ log.Debug("civisibility.git: getting the commit SHAs of the last 1000 commits in the local Git repository")
+ out, err := execGitString("log", "--format=%H", "-n", "1000", "--since=\"1 month ago\"")
+ if err != nil || out == "" {
+ return []string{}
+ }
+ return strings.Split(out, "\n")
+}
+
+// UnshallowGitRepository converts a shallow clone into a complete clone by fetching all missing commits without git content (only commits and tree objects).
+func UnshallowGitRepository() (bool, error) {
+
+ // let's do a first check to see if the repository is a shallow clone
+ log.Debug("civisibility.unshallow: checking if the repository is a shallow clone")
+ isAShallowClone, err := isAShallowCloneRepository()
+ if err != nil {
+ return false, fmt.Errorf("civisibility.unshallow: error checking if the repository is a shallow clone: %s", err.Error())
+ }
+
+ // if the git repo is not a shallow clone, we can return early
+ if !isAShallowClone {
+ log.Debug("civisibility.unshallow: the repository is not a shallow clone")
+ return false, nil
+ }
+
+ // the git repo is a shallow clone, we need to double check if there are more than just 1 commit in the logs.
+ log.Debug("civisibility.unshallow: the repository is a shallow clone, checking if there are more than one commit in the logs")
+ hasMoreThanOneCommits, err := hasTheGitLogHaveMoreThanOneCommits()
+ if err != nil {
+ return false, fmt.Errorf("civisibility.unshallow: error checking if the git log has more than one commit: %s", err.Error())
+ }
+
+ // if there are more than 1 commits, we can return early
+ if hasMoreThanOneCommits {
+ log.Debug("civisibility.unshallow: the git log has more than one commits")
+ return false, nil
+ }
+
+ // let's check the git version >= 2.27.0 (git --version) to see if we can unshallow the repository
+ log.Debug("civisibility.unshallow: checking the git version")
+ major, minor, patch, err := getGitVersion()
+ if err != nil {
+ return false, fmt.Errorf("civisibility.unshallow: error getting the git version: %s", err.Error())
+ }
+ log.Debug("civisibility.unshallow: git version: %v.%v.%v", major, minor, patch)
+ if major < 2 || (major == 2 && minor < 27) {
+ log.Debug("civisibility.unshallow: the git version is less than 2.27.0 we cannot unshallow the repository")
+ return false, nil
+ }
+
+ // after asking for 2 logs lines, if the git log command returns just one commit sha, we reconfigure the repo
+ // to ask for git commits and trees of the last month (no blobs)
+
+ // let's get the origin name (git config --default origin --get clone.defaultRemoteName)
+ originName, err := execGitString("config", "--default", "origin", "--get", "clone.defaultRemoteName")
+ if err != nil {
+ return false, fmt.Errorf("civisibility.unshallow: error getting the origin name: %s\n%s", err.Error(), originName)
+ }
+ if originName == "" {
+ // if the origin name is empty, we fallback to "origin"
+ originName = "origin"
+ }
+ log.Debug("civisibility.unshallow: origin name: %v", originName)
+
+ // let's get the sha of the HEAD (git rev-parse HEAD)
+ headSha, err := execGitString("rev-parse", "HEAD")
+ if err != nil {
+ return false, fmt.Errorf("civisibility.unshallow: error getting the HEAD sha: %s\n%s", err.Error(), headSha)
+ }
+ if headSha == "" {
+ // if the HEAD is empty, we fallback to the current branch (git branch --show-current)
+ headSha, err = execGitString("branch", "--show-current")
+ if err != nil {
+ return false, fmt.Errorf("civisibility.unshallow: error getting the current branch: %s\n%s", err.Error(), headSha)
+ }
+ }
+ log.Debug("civisibility.unshallow: HEAD sha: %v", headSha)
+
+ // let's fetch the missing commits and trees from the last month
+ // git fetch --shallow-since="1 month ago" --update-shallow --filter="blob:none" --recurse-submodules=no $(git config --default origin --get clone.defaultRemoteName) $(git rev-parse HEAD)
+ log.Debug("civisibility.unshallow: fetching the missing commits and trees from the last month")
+ fetchOutput, err := execGitString("fetch", "--shallow-since=\"1 month ago\"", "--update-shallow", "--filter=blob:none", "--recurse-submodules=no", originName, headSha)
+
+ // let's check if the last command was unsuccessful
+ if err != nil || fetchOutput == "" {
+ log.Debug("civisibility.unshallow: error fetching the missing commits and trees from the last month: %v", err)
+ // ***
+ // The previous command has a drawback: if the local HEAD is a commit that has not been pushed to the remote, it will fail.
+ // If this is the case, we fallback to: `git fetch --shallow-since="1 month ago" --update-shallow --filter="blob:none" --recurse-submodules=no $(git config --default origin --get clone.defaultRemoteName) $(git rev-parse --abbrev-ref --symbolic-full-name @{upstream})`
+ // This command will attempt to use the tracked branch for the current branch in order to unshallow.
+ // ***
+
+ // let's get the remote branch name: git rev-parse --abbrev-ref --symbolic-full-name @{upstream}
+ var remoteBranchName string
+ log.Debug("civisibility.unshallow: getting the remote branch name")
+ remoteBranchName, err = execGitString("rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{upstream}")
+ if err == nil {
+ // let's try the alternative: git fetch --shallow-since="1 month ago" --update-shallow --filter="blob:none" --recurse-submodules=no $(git config --default origin --get clone.defaultRemoteName) $(git rev-parse --abbrev-ref --symbolic-full-name @{upstream})
+ log.Debug("civisibility.unshallow: fetching the missing commits and trees from the last month using the remote branch name")
+ fetchOutput, err = execGitString("fetch", "--shallow-since=\"1 month ago\"", "--update-shallow", "--filter=blob:none", "--recurse-submodules=no", originName, remoteBranchName)
+ }
+ }
+
+ // let's check if the last command was unsuccessful
+ if err != nil || fetchOutput == "" {
+ log.Debug("civisibility.unshallow: error fetching the missing commits and trees from the last month: %v", err)
+ // ***
+ // It could be that the CI is working on a detached HEAD or maybe branch tracking hasn’t been set up.
+ // In that case, this command will also fail, and we will finally fallback to we just unshallow all the things:
+ // `git fetch --shallow-since="1 month ago" --update-shallow --filter="blob:none" --recurse-submodules=no $(git config --default origin --get clone.defaultRemoteName)`
+ // ***
+
+ // let's try the last fallback: git fetch --shallow-since="1 month ago" --update-shallow --filter="blob:none" --recurse-submodules=no $(git config --default origin --get clone.defaultRemoteName)
+ log.Debug("civisibility.unshallow: fetching the missing commits and trees from the last month using the origin name")
+ fetchOutput, err = execGitString("fetch", "--shallow-since=\"1 month ago\"", "--update-shallow", "--filter=blob:none", "--recurse-submodules=no", originName)
+ }
+
+ if err != nil {
+ return false, fmt.Errorf("civisibility.unshallow: error: %s\n%s", err.Error(), fetchOutput)
+ }
+
+ log.Debug("civisibility.unshallow: was completed successfully")
+ return true, nil
+}
+
// filterSensitiveInfo removes sensitive information from a given URL using a regular expression.
// It replaces the user credentials part of the URL (if present) with an empty string.
//
@@ -102,3 +307,87 @@ func getLocalGitData() (localGitData, error) {
func filterSensitiveInfo(url string) string {
return string(regexpSensitiveInfo.ReplaceAll([]byte(url), []byte("$1"))[:])
}
+
+// isAShallowCloneRepository checks if the local Git repository is a shallow clone.
+func isAShallowCloneRepository() (bool, error) {
+ // git rev-parse --is-shallow-repository
+ out, err := execGitString("rev-parse", "--is-shallow-repository")
+ if err != nil {
+ return false, err
+ }
+
+ return strings.TrimSpace(out) == "true", nil
+}
+
+// hasTheGitLogHaveMoreThanOneCommits checks if the local Git repository has more than one commit.
+func hasTheGitLogHaveMoreThanOneCommits() (bool, error) {
+ // git log --format=oneline -n 2
+ out, err := execGitString("log", "--format=oneline", "-n", "2")
+ if err != nil || out == "" {
+ return false, err
+ }
+
+ commitsCount := strings.Count(out, "\n") + 1
+ return commitsCount > 1, nil
+}
+
+// getObjectsSha get the objects shas from the git repository based on the commits to include and exclude
+func getObjectsSha(commitsToInclude []string, commitsToExclude []string) []string {
+ // git rev-list --objects --no-object-names --filter=blob:none --since="1 month ago" HEAD " + string.Join(" ", commitsToExclude.Select(c => "^" + c)) + " " + string.Join(" ", commitsToInclude);
+ commitsToExcludeArgs := make([]string, len(commitsToExclude))
+ for i, c := range commitsToExclude {
+ commitsToExcludeArgs[i] = "^" + c
+ }
+ args := append([]string{"rev-list", "--objects", "--no-object-names", "--filter=blob:none", "--since=\"1 month ago\"", "HEAD"}, append(commitsToExcludeArgs, commitsToInclude...)...)
+ out, err := execGitString(args...)
+ if err != nil {
+ return []string{}
+ }
+ return strings.Split(out, "\n")
+}
+
+func CreatePackFiles(commitsToInclude []string, commitsToExclude []string) []string {
+ // get the objects shas to send
+ objectsShas := getObjectsSha(commitsToInclude, commitsToExclude)
+ if len(objectsShas) == 0 {
+ log.Debug("civisibility: no objects found to send")
+ return nil
+ }
+
+ // create the objects shas string
+ var objectsShasString string
+ for _, objectSha := range objectsShas {
+ objectsShasString += objectSha + "\n"
+ }
+
+ // get a temporary path to store the pack files
+ temporaryPath, err := os.MkdirTemp("", "pack-objects")
+ if err != nil {
+ log.Warn("civisibility: error creating temporary directory: %s", err)
+ return nil
+ }
+
+ // git pack-objects --compression=9 --max-pack-size={MaxPackFileSizeInMb}m "{temporaryPath}"
+ out, err := execGitStringWithInput(objectsShasString,
+ "pack-objects", "--compression=9", "--max-pack-size="+strconv.Itoa(MaxPackFileSizeInMb)+"m", temporaryPath+"/")
+ if err != nil {
+ log.Warn("civisibility: error creating pack files: %s", err)
+ return nil
+ }
+
+ // construct the full path to the pack files
+ var packFiles []string
+ for i, packFile := range strings.Split(out, "\n") {
+ file := filepath.Join(temporaryPath, fmt.Sprintf("-%s.pack", packFile))
+
+ // check if the pack file exists
+ if _, err := os.Stat(file); os.IsNotExist(err) {
+ log.Warn("civisibility: pack file not found: %s", packFiles[i])
+ continue
+ }
+
+ packFiles = append(packFiles, file)
+ }
+
+ return packFiles
+}
diff --git a/internal/civisibility/utils/git_test.go b/internal/civisibility/utils/git_test.go
index fd91ebee11..c3729b0317 100644
--- a/internal/civisibility/utils/git_test.go
+++ b/internal/civisibility/utils/git_test.go
@@ -60,3 +60,13 @@ func TestGetLocalGitData(t *testing.T) {
assert.NotEmpty(t, data.CommitterDate)
assert.NotEmpty(t, data.CommitMessage)
}
+
+func TestGetLastLocalGitCommitShas(t *testing.T) {
+ shas := GetLastLocalGitCommitShas()
+ assert.NotEmpty(t, shas)
+}
+
+func TestUnshallowGitRepository(t *testing.T) {
+ _, err := UnshallowGitRepository()
+ assert.NoError(t, err)
+}
diff --git a/internal/civisibility/utils/home.go b/internal/civisibility/utils/home.go
index 8625010feb..fd2000c893 100644
--- a/internal/civisibility/utils/home.go
+++ b/internal/civisibility/utils/home.go
@@ -13,6 +13,8 @@ import (
"runtime"
"strconv"
"strings"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
)
// This code is based on: https://github.com/mitchellh/go-homedir/blob/v1.1.0/homedir.go (MIT License)
@@ -55,7 +57,11 @@ func ExpandPath(path string) string {
// Returns:
//
// The home directory of the current user.
-func getHomeDir() string {
+func getHomeDir() (homeDir string) {
+ defer func() {
+ log.Debug("civisibility: home directory: %v", homeDir)
+ }()
+
if runtime.GOOS == "windows" {
if home := os.Getenv("HOME"); home != "" {
// First prefer the HOME environment variable
diff --git a/internal/civisibility/utils/names.go b/internal/civisibility/utils/names.go
index 1edcb2b972..94eacdb189 100644
--- a/internal/civisibility/utils/names.go
+++ b/internal/civisibility/utils/names.go
@@ -10,9 +10,15 @@ import (
"fmt"
"path/filepath"
"runtime"
+ "slices"
"strings"
)
+var (
+ // ignoredFunctionsFromStackTrace array with functions we want to ignore on the final stacktrace (because doesn't add anything useful)
+ ignoredFunctionsFromStackTrace = []string{"runtime.gopanic", "runtime.panicmem", "runtime.sigpanic"}
+)
+
// GetModuleAndSuiteName extracts the module name and suite name from a given program counter (pc).
// This function utilizes runtime.FuncForPC to retrieve the full function name associated with the
// program counter, then splits the string to separate the package name from the function name.
@@ -72,6 +78,11 @@ func GetStacktrace(skip int) string {
buffer := new(bytes.Buffer)
for {
if frame, ok := frames.Next(); ok {
+ // let's check if we need to ignore this frame
+ if slices.Contains(ignoredFunctionsFromStackTrace, frame.Function) {
+ continue
+ }
+ // writing frame to the buffer
_, _ = fmt.Fprintf(buffer, "%s\n\t%s:%d\n", frame.Function, frame.File, frame.Line)
} else {
break
diff --git a/internal/civisibility/utils/net/client.go b/internal/civisibility/utils/net/client.go
new file mode 100644
index 0000000000..ca62998759
--- /dev/null
+++ b/internal/civisibility/utils/net/client.go
@@ -0,0 +1,235 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package net
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "math/rand/v2"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "regexp"
+ "strings"
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/constants"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/civisibility/utils"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+const (
+ DefaultMaxRetries int = 5
+ DefaultBackoff time.Duration = 150 * time.Millisecond
+)
+
+type (
+ Client interface {
+ GetSettings() (*SettingsResponseData, error)
+ GetEarlyFlakeDetectionData() (*EfdResponseData, error)
+ GetCommits(localCommits []string) ([]string, error)
+ SendPackFiles(commitSha string, packFiles []string) (bytes int64, err error)
+ }
+
+ client struct {
+ id string
+ agentless bool
+ baseURL string
+ environment string
+ serviceName string
+ workingDirectory string
+ repositoryURL string
+ commitSha string
+ branchName string
+ testConfigurations testConfigurations
+ headers map[string]string
+ handler *RequestHandler
+ }
+
+ testConfigurations struct {
+ OsPlatform string `json:"os.platform,omitempty"`
+ OsVersion string `json:"os.version,omitempty"`
+ OsArchitecture string `json:"os.architecture,omitempty"`
+ RuntimeName string `json:"runtime.name,omitempty"`
+ RuntimeArchitecture string `json:"runtime.architecture,omitempty"`
+ RuntimeVersion string `json:"runtime.version,omitempty"`
+ Custom map[string]string `json:"custom,omitempty"`
+ }
+)
+
+var _ Client = &client{}
+
+func NewClientWithServiceName(serviceName string) Client {
+ ciTags := utils.GetCITags()
+
+ // get the environment
+ environment := os.Getenv("DD_ENV")
+ if environment == "" {
+ environment = "none"
+ }
+
+ // get the service name
+ if serviceName == "" {
+ serviceName = os.Getenv("DD_SERVICE")
+ if serviceName == "" {
+ if repoURL, ok := ciTags[constants.GitRepositoryURL]; ok {
+ // regex to sanitize the repository url to be used as a service name
+ repoRegex := regexp.MustCompile(`(?m)/([a-zA-Z0-9\-_.]*)$`)
+ matches := repoRegex.FindStringSubmatch(repoURL)
+ if len(matches) > 1 {
+ repoURL = strings.TrimSuffix(matches[1], ".git")
+ }
+ serviceName = repoURL
+ }
+ }
+ }
+
+ // get all custom configuration (test.configuration.*)
+ var customConfiguration map[string]string
+ if v := os.Getenv("DD_TAGS"); v != "" {
+ prefix := "test.configuration."
+ for k, v := range internal.ParseTagString(v) {
+ if strings.HasPrefix(k, prefix) {
+ if customConfiguration == nil {
+ customConfiguration = map[string]string{}
+ }
+
+ customConfiguration[strings.TrimPrefix(k, prefix)] = v
+ }
+ }
+ }
+
+ // create default http headers and get base url
+ defaultHeaders := map[string]string{}
+ var baseURL string
+ var requestHandler *RequestHandler
+
+ agentlessEnabled := internal.BoolEnv(constants.CIVisibilityAgentlessEnabledEnvironmentVariable, false)
+ if agentlessEnabled {
+ // Agentless mode is enabled.
+ APIKeyValue := os.Getenv(constants.APIKeyEnvironmentVariable)
+ if APIKeyValue == "" {
+ log.Error("An API key is required for agentless mode. Use the DD_API_KEY env variable to set it")
+ return nil
+ }
+
+ defaultHeaders["dd-api-key"] = APIKeyValue
+
+ // Check for a custom agentless URL.
+ agentlessURL := os.Getenv(constants.CIVisibilityAgentlessURLEnvironmentVariable)
+
+ if agentlessURL == "" {
+ // Use the standard agentless URL format.
+ site := "datadoghq.com"
+ if v := os.Getenv("DD_SITE"); v != "" {
+ site = v
+ }
+
+ baseURL = fmt.Sprintf("https://api.%s", site)
+ } else {
+ // Use the custom agentless URL.
+ baseURL = agentlessURL
+ }
+
+ requestHandler = NewRequestHandler()
+ } else {
+ // Use agent mode with the EVP proxy.
+ defaultHeaders["X-Datadog-EVP-Subdomain"] = "api"
+
+ agentURL := internal.AgentURLFromEnv()
+ if agentURL.Scheme == "unix" {
+ // If we're connecting over UDS we can just rely on the agent to provide the hostname
+ log.Debug("connecting to agent over unix, do not set hostname on any traces")
+ dialer := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+ requestHandler = NewRequestHandlerWithClient(&http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
+ return dialer.DialContext(ctx, "unix", (&net.UnixAddr{
+ Name: agentURL.Path,
+ Net: "unix",
+ }).String())
+ },
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ },
+ Timeout: 10 * time.Second,
+ })
+ agentURL = &url.URL{
+ Scheme: "http",
+ Host: fmt.Sprintf("UDS_%s", strings.NewReplacer(":", "_", "/", "_", `\`, "_").Replace(agentURL.Path)),
+ }
+ } else {
+ requestHandler = NewRequestHandler()
+ }
+
+ baseURL = agentURL.String()
+ }
+
+ // create random id (the backend associate all transactions with the client request)
+ id := fmt.Sprint(rand.Uint64() & math.MaxInt64)
+ defaultHeaders["trace_id"] = id
+ defaultHeaders["parent_id"] = id
+
+ log.Debug("ciVisibilityHttpClient: new client created [id: %v, agentless: %v, url: %v, env: %v, serviceName: %v]",
+ id, agentlessEnabled, baseURL, environment, serviceName)
+ return &client{
+ id: id,
+ agentless: agentlessEnabled,
+ baseURL: baseURL,
+ environment: environment,
+ serviceName: serviceName,
+ workingDirectory: ciTags[constants.CIWorkspacePath],
+ repositoryURL: ciTags[constants.GitRepositoryURL],
+ commitSha: ciTags[constants.GitCommitSHA],
+ branchName: ciTags[constants.GitBranch],
+ testConfigurations: testConfigurations{
+ OsPlatform: ciTags[constants.OSPlatform],
+ OsVersion: ciTags[constants.OSVersion],
+ OsArchitecture: ciTags[constants.OSArchitecture],
+ RuntimeName: ciTags[constants.RuntimeName],
+ RuntimeVersion: ciTags[constants.RuntimeVersion],
+ Custom: customConfiguration,
+ },
+ headers: defaultHeaders,
+ handler: requestHandler,
+ }
+}
+
+func NewClient() Client {
+ return NewClientWithServiceName("")
+}
+
+func (c *client) getURLPath(urlPath string) string {
+ if c.agentless {
+ return fmt.Sprintf("%s/%s", c.baseURL, urlPath)
+ }
+
+ return fmt.Sprintf("%s/%s/%s", c.baseURL, "evp_proxy/v2", urlPath)
+}
+
+func (c *client) getPostRequestConfig(url string, body interface{}) *RequestConfig {
+ return &RequestConfig{
+ Method: "POST",
+ URL: c.getURLPath(url),
+ Headers: c.headers,
+ Body: body,
+ Format: FormatJSON,
+ Compressed: false,
+ Files: nil,
+ MaxRetries: DefaultMaxRetries,
+ Backoff: DefaultBackoff,
+ }
+}
diff --git a/internal/civisibility/utils/net/client_test.go b/internal/civisibility/utils/net/client_test.go
new file mode 100644
index 0000000000..732e17d03a
--- /dev/null
+++ b/internal/civisibility/utils/net/client_test.go
@@ -0,0 +1,231 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package net
+
+import (
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func saveEnv() []string {
+ return os.Environ()
+}
+
+func restoreEnv(env []string) {
+ os.Clearenv()
+ for _, e := range env {
+ kv := strings.SplitN(e, "=", 2)
+ os.Setenv(kv[0], kv[1])
+ }
+}
+
+func TestNewClient_DefaultValues(t *testing.T) {
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ os.Clearenv()
+ os.Setenv("PATH", path)
+ // Do not set any environment variables to simulate default behavior
+
+ cInterface := NewClient()
+ if cInterface == nil {
+ t.Fatal("Expected non-nil client")
+ }
+
+ c, ok := cInterface.(*client)
+ if !ok {
+ t.Fatal("Expected client to be of type *client")
+ }
+
+ if c.environment != "none" {
+ t.Errorf("Expected environment 'none', got '%s'", c.environment)
+ }
+
+ if c.agentless {
+ t.Errorf("Expected agentless to be false")
+ }
+
+ // Since serviceName depends on CI tags, which we cannot mock without access to internal functions,
+ // we check if serviceName is set or not empty.
+ if c.serviceName == "" {
+ t.Errorf("Expected serviceName to be set, got empty string")
+ }
+}
+
+func TestNewClient_AgentlessEnabled(t *testing.T) {
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ os.Clearenv()
+ os.Setenv("PATH", path)
+ os.Setenv("DD_CIVISIBILITY_AGENTLESS_ENABLED", "true")
+ os.Setenv("DD_API_KEY", "test_api_key")
+ os.Setenv("DD_SITE", "site.com")
+
+ cInterface := NewClient()
+ if cInterface == nil {
+ t.Fatal("Expected non-nil client")
+ }
+
+ c, ok := cInterface.(*client)
+ if !ok {
+ t.Fatal("Expected client to be of type *client")
+ }
+
+ if !c.agentless {
+ t.Errorf("Expected agentless to be true")
+ }
+
+ expectedBaseURL := "https://api.site.com"
+ if c.baseURL != expectedBaseURL {
+ t.Errorf("Expected baseUrl '%s', got '%s'", expectedBaseURL, c.baseURL)
+ }
+
+ if c.headers["dd-api-key"] != "test_api_key" {
+ t.Errorf("Expected dd-api-key 'test_api_key', got '%s'", c.headers["dd-api-key"])
+ }
+}
+
+func TestNewClient_AgentlessEnabledWithNoApiKey(t *testing.T) {
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ os.Clearenv()
+ os.Setenv("PATH", path)
+ os.Setenv("DD_CIVISIBILITY_AGENTLESS_ENABLED", "true")
+
+ cInterface := NewClient()
+ if cInterface != nil {
+ t.Fatal("Expected nil client")
+ }
+}
+
+func TestNewClient_CustomAgentlessURL(t *testing.T) {
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, "https://custom.agentless.url")
+
+ cInterface := NewClient()
+ if cInterface == nil {
+ t.Fatal("Expected non-nil client")
+ }
+
+ c, ok := cInterface.(*client)
+ if !ok {
+ t.Fatal("Expected client to be of type *client")
+ }
+
+ if c.baseURL != "https://custom.agentless.url" {
+ t.Errorf("Expected baseUrl 'https://custom.agentless.url', got '%s'", c.baseURL)
+ }
+}
+
+func TestClient_getUrlPath_Agentless(t *testing.T) {
+ c := &client{
+ agentless: true,
+ baseURL: "https://api.customhost.com",
+ }
+
+ urlPath := c.getURLPath("some/path")
+ expected := "https://api.customhost.com/some/path"
+ if urlPath != expected {
+ t.Errorf("Expected urlPath '%s', got '%s'", expected, urlPath)
+ }
+}
+
+func TestClient_getUrlPath_Agent(t *testing.T) {
+ c := &client{
+ agentless: false,
+ baseURL: "http://agent.url",
+ }
+
+ urlPath := c.getURLPath("some/path")
+ expected := "http://agent.url/evp_proxy/v2/some/path"
+ if urlPath != expected {
+ t.Errorf("Expected urlPath '%s', got '%s'", expected, urlPath)
+ }
+}
+
+func TestClient_getPostRequestConfig(t *testing.T) {
+ c := &client{
+ agentless: false,
+ baseURL: "http://agent.url",
+ headers: map[string]string{
+ "trace_id": "12345",
+ "parent_id": "12345",
+ },
+ }
+
+ body := map[string]string{"key": "value"}
+ config := c.getPostRequestConfig("some/path", body)
+
+ if config.Method != "POST" {
+ t.Errorf("Expected Method 'POST', got '%s'", config.Method)
+ }
+
+ expectedURL := "http://agent.url/evp_proxy/v2/some/path"
+ if config.URL != expectedURL {
+ t.Errorf("Expected URL '%s', got '%s'", expectedURL, config.URL)
+ }
+
+ if !reflect.DeepEqual(config.Headers, c.headers) {
+ t.Errorf("Headers do not match")
+ }
+
+ if config.Format != FormatJSON {
+ t.Errorf("Expected Format 'FormatJSON', got '%v'", config.Format)
+ }
+
+ if config.Compressed {
+ t.Errorf("Expected Compressed to be false")
+ }
+
+ if config.MaxRetries != DefaultMaxRetries {
+ t.Errorf("Expected MaxRetries '%d', got '%d'", DefaultMaxRetries, config.MaxRetries)
+ }
+
+ if config.Backoff != DefaultBackoff {
+ t.Errorf("Expected Backoff '%v', got '%v'", DefaultBackoff, config.Backoff)
+ }
+}
+
+func TestNewClient_TestConfigurations(t *testing.T) {
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, "https://custom.agentless.url")
+ os.Setenv("DD_TAGS", "test.configuration.MyTag:MyValue")
+
+ cInterface := NewClient()
+ if cInterface == nil {
+ t.Fatal("Expected non-nil client")
+ }
+
+ c, ok := cInterface.(*client)
+ if !ok {
+ t.Fatal("Expected client to be of type *client")
+ }
+
+ if c.testConfigurations.Custom["MyTag"] != "MyValue" {
+ t.Errorf("Expected 'MyValue', got '%s'", c.testConfigurations.Custom["MyTag"])
+ }
+}
+
+func setCiVisibilityEnv(path string, url string) {
+ os.Clearenv()
+ os.Setenv("PATH", path)
+ os.Setenv("DD_CIVISIBILITY_AGENTLESS_ENABLED", "true")
+ os.Setenv("DD_API_KEY", "test_api_key")
+ os.Setenv("DD_CIVISIBILITY_AGENTLESS_URL", url)
+}
diff --git a/internal/civisibility/utils/net/efd_api.go b/internal/civisibility/utils/net/efd_api.go
new file mode 100644
index 0000000000..5e2ad94547
--- /dev/null
+++ b/internal/civisibility/utils/net/efd_api.go
@@ -0,0 +1,77 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package net
+
+import (
+ "fmt"
+)
+
+const (
+ efdRequestType string = "ci_app_libraries_tests_request"
+ efdURLPath string = "api/v2/ci/libraries/tests"
+)
+
+type (
+ efdRequest struct {
+ Data efdRequestHeader `json:"data"`
+ }
+
+ efdRequestHeader struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ Attributes EfdRequestData `json:"attributes"`
+ }
+
+ EfdRequestData struct {
+ Service string `json:"service"`
+ Env string `json:"env"`
+ RepositoryURL string `json:"repository_url"`
+ Configurations testConfigurations `json:"configurations"`
+ }
+
+ efdResponse struct {
+ Data struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ Attributes EfdResponseData `json:"attributes"`
+ } `json:"data"`
+ }
+
+ EfdResponseData struct {
+ Tests EfdResponseDataModules `json:"tests"`
+ }
+
+ EfdResponseDataModules map[string]EfdResponseDataSuites
+ EfdResponseDataSuites map[string][]string
+)
+
+func (c *client) GetEarlyFlakeDetectionData() (*EfdResponseData, error) {
+ body := efdRequest{
+ Data: efdRequestHeader{
+ ID: c.id,
+ Type: efdRequestType,
+ Attributes: EfdRequestData{
+ Service: c.serviceName,
+ Env: c.environment,
+ RepositoryURL: c.repositoryURL,
+ Configurations: c.testConfigurations,
+ },
+ },
+ }
+
+ response, err := c.handler.SendRequest(*c.getPostRequestConfig(efdURLPath, body))
+ if err != nil {
+ return nil, fmt.Errorf("sending early flake detection request: %s", err.Error())
+ }
+
+ var responseObject efdResponse
+ err = response.Unmarshal(&responseObject)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshalling early flake detection data response: %s", err.Error())
+ }
+
+ return &responseObject.Data.Attributes, nil
+}
diff --git a/internal/civisibility/utils/net/efd_api_test.go b/internal/civisibility/utils/net/efd_api_test.go
new file mode 100644
index 0000000000..93008d25ce
--- /dev/null
+++ b/internal/civisibility/utils/net/efd_api_test.go
@@ -0,0 +1,106 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package net
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEfdApiRequest(t *testing.T) {
+ var c *client
+ expectedResponse := efdResponse{}
+ expectedResponse.Data.Type = settingsRequestType
+ expectedResponse.Data.Attributes.Tests = EfdResponseDataModules{
+ "MyModule1": EfdResponseDataSuites{
+ "MySuite1": []string{"Test1", "Test2"},
+ },
+ "MyModule2": EfdResponseDataSuites{
+ "MySuite2": []string{"Test3", "Test4"},
+ },
+ }
+
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, "failed to read body", http.StatusBadRequest)
+ return
+ }
+
+ if r.Header.Get(HeaderContentType) == ContentTypeJSON {
+ var request efdRequest
+ json.Unmarshal(body, &request)
+ assert.Equal(t, c.id, request.Data.ID)
+ assert.Equal(t, efdRequestType, request.Data.Type)
+ assert.Equal(t, efdURLPath, r.URL.Path[1:])
+ assert.Equal(t, c.environment, request.Data.Attributes.Env)
+ assert.Equal(t, c.repositoryURL, request.Data.Attributes.RepositoryURL)
+ assert.Equal(t, c.serviceName, request.Data.Attributes.Service)
+ assert.Equal(t, c.testConfigurations, request.Data.Attributes.Configurations)
+
+ w.Header().Set(HeaderContentType, ContentTypeJSON)
+ expectedResponse.Data.ID = request.Data.ID
+ json.NewEncoder(w).Encode(expectedResponse)
+ }
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ c = cInterface.(*client)
+ efdData, err := cInterface.GetEarlyFlakeDetectionData()
+ assert.Nil(t, err)
+ assert.Equal(t, expectedResponse.Data.Attributes, *efdData)
+}
+
+func TestEfdApiRequestFailToUnmarshal(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "failed to read body", http.StatusBadRequest)
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ efdData, err := cInterface.GetEarlyFlakeDetectionData()
+ assert.Nil(t, efdData)
+ assert.NotNil(t, err)
+ assert.Contains(t, err.Error(), "cannot unmarshal response")
+}
+
+func TestEfdApiRequestFailToGet(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "internal processing error", http.StatusInternalServerError)
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ efdData, err := cInterface.GetEarlyFlakeDetectionData()
+ assert.Nil(t, efdData)
+ assert.NotNil(t, err)
+ assert.Contains(t, err.Error(), "sending early flake detection request")
+}
diff --git a/internal/civisibility/utils/net/http.go b/internal/civisibility/utils/net/http.go
new file mode 100644
index 0000000000..4176cd895a
--- /dev/null
+++ b/internal/civisibility/utils/net/http.go
@@ -0,0 +1,398 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package net
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "mime"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "strconv"
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+)
+
+// Constants for common strings
+const (
+ ContentTypeJSON = "application/json"
+ ContentTypeJSONAlternative = "application/vnd.api+json"
+ ContentTypeOctetStream = "application/octet-stream"
+ ContentEncodingGzip = "gzip"
+ HeaderContentType = "Content-Type"
+ HeaderContentEncoding = "Content-Encoding"
+ HeaderAcceptEncoding = "Accept-Encoding"
+ HeaderRateLimitReset = "x-ratelimit-reset"
+ HTTPStatusTooManyRequests = 429
+ FormatJSON = "json"
+)
+
+// FormFile represents a file to be uploaded in a multipart form request.
+type FormFile struct {
+ FieldName string // The name of the form field
+ FileName string // The name of the file
+ Content interface{} // The content of the file (can be []byte, map, struct, etc.)
+ ContentType string // The MIME type of the file (e.g., "application/json", "application/octet-stream")
+}
+
+// RequestConfig holds configuration for a request.
+type RequestConfig struct {
+ Method string // HTTP method: GET or POST
+ URL string // Request URL
+ Headers map[string]string // Additional HTTP headers
+ Body interface{} // Request body for JSON, MessagePack, or raw bytes
+ Format string // Format: "json" or "msgpack"
+ Compressed bool // Whether to use gzip compression
+ Files []FormFile // Files to be uploaded in a multipart form data request
+ MaxRetries int // Maximum number of retries
+ Backoff time.Duration // Initial backoff duration for retries
+}
+
+// Response represents the HTTP response with deserialization capabilities and status code.
+type Response struct {
+ Body []byte // Response body in raw format
+ Format string // Format of the response (json or msgpack)
+ StatusCode int // HTTP status code
+ CanUnmarshal bool // Whether the response body can be unmarshalled
+}
+
+// Unmarshal deserializes the response body into the provided target based on the response format.
+func (r *Response) Unmarshal(target interface{}) error {
+ if !r.CanUnmarshal {
+ return fmt.Errorf("cannot unmarshal response with status code %d", r.StatusCode)
+ }
+
+ switch r.Format {
+ case FormatJSON:
+ return json.Unmarshal(r.Body, target)
+ default:
+ return fmt.Errorf("unsupported format '%s' for unmarshalling", r.Format)
+ }
+}
+
+// RequestHandler handles HTTP requests with retries and different formats.
+type RequestHandler struct {
+ Client *http.Client
+}
+
+// NewRequestHandler creates a new RequestHandler with a default HTTP client.
+func NewRequestHandler() *RequestHandler {
+ return &RequestHandler{
+ Client: &http.Client{
+ Timeout: 45 * time.Second, // Customize timeout as needed
+ },
+ }
+}
+
+// NewRequestHandlerWithClient creates a new RequestHandler with a custom http.Client
+func NewRequestHandlerWithClient(client *http.Client) *RequestHandler {
+ return &RequestHandler{
+ Client: client,
+ }
+}
+
+// SendRequest sends an HTTP request based on the provided configuration.
+func (rh *RequestHandler) SendRequest(config RequestConfig) (*Response, error) {
+ if config.MaxRetries <= 0 {
+ config.MaxRetries = DefaultMaxRetries // Default retries
+ }
+ if config.Backoff <= 0 {
+ config.Backoff = DefaultBackoff // Default backoff
+ }
+ if config.Method == "" {
+ return nil, errors.New("HTTP method is required")
+ }
+ if config.URL == "" {
+ return nil, errors.New("URL is required")
+ }
+
+ for attempt := 0; attempt <= config.MaxRetries; attempt++ {
+ log.Debug("ciVisibilityHttpClient: new request [method: %v, url: %v, attempt: %v, maxRetries: %v]",
+ config.Method, config.URL, attempt, config.MaxRetries)
+ stopRetries, rs, err := rh.internalSendRequest(&config, attempt)
+ if stopRetries {
+ return rs, err
+ }
+ }
+
+ return nil, errors.New("max retries exceeded")
+}
+
+func (rh *RequestHandler) internalSendRequest(config *RequestConfig, attempt int) (stopRetries bool, response *Response, requestError error) {
+ var req *http.Request
+
+ // Check if it's a multipart form data request
+ if len(config.Files) > 0 {
+ // Create multipart form data body
+ body, contentType, err := createMultipartFormData(config.Files, config.Compressed)
+ if err != nil {
+ return true, nil, err
+ }
+
+ if log.DebugEnabled() {
+ var files []string
+ for _, f := range config.Files {
+ files = append(files, f.FieldName)
+ }
+ log.Debug("ciVisibilityHttpClient: sending files %v", files)
+ }
+ req, err = http.NewRequest(config.Method, config.URL, bytes.NewBuffer(body))
+ if err != nil {
+ return true, nil, err
+ }
+ req.Header.Set(HeaderContentType, contentType)
+ if config.Compressed {
+ req.Header.Set(HeaderContentEncoding, ContentEncodingGzip)
+ }
+ } else if config.Body != nil {
+ // Handle JSON body
+ serializedBody, err := serializeData(config.Body, config.Format)
+ if err != nil {
+ return true, nil, err
+ }
+
+ if log.DebugEnabled() {
+ log.Debug("ciVisibilityHttpClient: serialized body [compressed: %v] %v", config.Compressed, string(serializedBody))
+ }
+
+ // Compress body if needed
+ if config.Compressed {
+ serializedBody, err = compressData(serializedBody)
+ if err != nil {
+ return true, nil, err
+ }
+ }
+
+ req, err = http.NewRequest(config.Method, config.URL, bytes.NewBuffer(serializedBody))
+ if err != nil {
+ return true, nil, err
+ }
+ if config.Format == FormatJSON {
+ req.Header.Set(HeaderContentType, ContentTypeJSON)
+ }
+ if config.Compressed {
+ req.Header.Set(HeaderContentEncoding, ContentEncodingGzip)
+ }
+ } else {
+ // Handle requests without a body (e.g., GET requests)
+ var err error
+ req, err = http.NewRequest(config.Method, config.URL, nil)
+ if err != nil {
+ return true, nil, err
+ }
+ }
+
+ // Set that is possible to handle gzip responses
+ req.Header.Set(HeaderAcceptEncoding, ContentEncodingGzip)
+
+ // Add custom headers if provided
+ for key, value := range config.Headers {
+ req.Header.Set(key, value)
+ }
+
+ resp, err := rh.Client.Do(req)
+ if err != nil {
+ log.Debug("ciVisibilityHttpClient: error [%v].", err)
+ // Retry if there's an error
+ exponentialBackoff(attempt, config.Backoff)
+ return false, nil, nil
+ }
+ // Close response body
+ defer resp.Body.Close()
+
+ // Capture the status code
+ statusCode := resp.StatusCode
+ log.Debug("ciVisibilityHttpClient: response status code [%v]", resp.StatusCode)
+
+ // Check for rate-limiting (HTTP 429)
+ if resp.StatusCode == HTTPStatusTooManyRequests {
+ rateLimitReset := resp.Header.Get(HeaderRateLimitReset)
+ if rateLimitReset != "" {
+ if resetTime, err := strconv.ParseInt(rateLimitReset, 10, 64); err == nil {
+ var waitDuration time.Duration
+ if resetTime > time.Now().Unix() {
+ // Assume it's a Unix timestamp
+ waitDuration = time.Until(time.Unix(resetTime, 0))
+ } else {
+ // Assume it's a duration in seconds
+ waitDuration = time.Duration(resetTime) * time.Second
+ }
+ if waitDuration > 0 {
+ time.Sleep(waitDuration)
+ }
+ return false, nil, nil
+ }
+ }
+
+ // Fallback to exponential backoff if header is missing or invalid
+ exponentialBackoff(attempt, config.Backoff)
+ return false, nil, nil
+ }
+
+ // Check status code for retries
+ if statusCode >= 406 {
+ // Retry if the status code is >= 406
+ exponentialBackoff(attempt, config.Backoff)
+ return false, nil, nil
+ }
+
+ responseBody, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return true, nil, err
+ }
+
+ // Decompress response if it is gzip compressed
+ if resp.Header.Get(HeaderContentEncoding) == ContentEncodingGzip {
+ responseBody, err = decompressData(responseBody)
+ if err != nil {
+ return true, nil, err
+ }
+ }
+
+ // Determine response format from headers
+ responseFormat := "unknown"
+ mediaType, _, err := mime.ParseMediaType(resp.Header.Get(HeaderContentType))
+ if err == nil {
+ if mediaType == ContentTypeJSON || mediaType == ContentTypeJSONAlternative {
+ responseFormat = FormatJSON
+ if log.DebugEnabled() {
+ log.Debug("ciVisibilityHttpClient: serialized response [%v]", string(responseBody))
+ }
+ }
+ }
+
+ // Determine if we can unmarshal based on status code (2xx)
+ canUnmarshal := statusCode >= 200 && statusCode < 300
+
+ // Return the successful response with status code and unmarshal capability
+ return true, &Response{Body: responseBody, Format: responseFormat, StatusCode: statusCode, CanUnmarshal: canUnmarshal}, nil
+}
+
+// Helper functions for data serialization, compression, and handling multipart form data
+
+// serializeData serializes the data based on the format.
+func serializeData(data interface{}, format string) ([]byte, error) {
+ switch v := data.(type) {
+ case []byte:
+ // If it's already a byte array, use it directly
+ return v, nil
+ default:
+ // Otherwise, serialize it according to the specified format
+ if format == FormatJSON {
+ return json.Marshal(data)
+ }
+ }
+ return nil, fmt.Errorf("unsupported format '%s' for data type '%T'", format, data)
+}
+
+// compressData compresses the data using gzip.
+func compressData(data []byte) ([]byte, error) {
+ if data == nil {
+ return nil, errors.New("attempt to compress a nil data array")
+ }
+
+ var buf bytes.Buffer
+ writer := gzip.NewWriter(&buf)
+ _, err := writer.Write(data)
+ if err != nil {
+ return nil, err
+ }
+ writer.Close()
+ return buf.Bytes(), nil
+}
+
+// decompressData decompresses gzip data.
+func decompressData(data []byte) ([]byte, error) {
+ reader, err := gzip.NewReader(bytes.NewReader(data))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create gzip reader: %v", err)
+ }
+ defer reader.Close()
+ decompressedData, err := io.ReadAll(reader)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decompress data: %v", err)
+ }
+ return decompressedData, nil
+}
+
+// exponentialBackoff performs an exponential backoff with retries.
+func exponentialBackoff(retryCount int, initialDelay time.Duration) {
+ maxDelay := 30 * time.Second
+ delay := initialDelay * (1 << uint(retryCount)) // Exponential backoff
+ if delay > maxDelay {
+ delay = maxDelay
+ }
+ time.Sleep(delay)
+}
+
+// prepareContent prepares the content for a FormFile by serializing it if needed.
+func prepareContent(content interface{}, contentType string) ([]byte, error) {
+ if contentType == ContentTypeJSON {
+ return serializeData(content, FormatJSON)
+ } else if contentType == ContentTypeOctetStream {
+ // For binary data, ensure it's already in byte format
+ if data, ok := content.([]byte); ok {
+ return data, nil
+ }
+ return nil, errors.New("content must be []byte for octet-stream content type")
+ }
+ return nil, errors.New("unsupported content type for serialization")
+}
+
+// createMultipartFormData creates a multipart form data request body with the given files.
+// It also compresses the data using gzip if compression is enabled.
+func createMultipartFormData(files []FormFile, compressed bool) ([]byte, string, error) {
+ var buf bytes.Buffer
+ writer := multipart.NewWriter(&buf)
+
+ for _, file := range files {
+ partHeaders := textproto.MIMEHeader{}
+ if file.FileName == "" {
+ partHeaders.Set("Content-Disposition", fmt.Sprintf(`form-data; name="%s"`, file.FieldName))
+ } else {
+ partHeaders.Set("Content-Disposition", fmt.Sprintf(`form-data; name="%s"; filename="%s"`, file.FieldName, file.FileName))
+ }
+ partHeaders.Set("Content-Type", file.ContentType)
+
+ part, err := writer.CreatePart(partHeaders)
+ if err != nil {
+ return nil, "", err
+ }
+
+ // Prepare the file content (serialize if necessary based on content type)
+ fileContent, err := prepareContent(file.Content, file.ContentType)
+ if err != nil {
+ return nil, "", err
+ }
+
+ if _, err := part.Write(fileContent); err != nil {
+ return nil, "", err
+ }
+ }
+
+ // Close the writer to set the terminating boundary
+ err := writer.Close()
+ if err != nil {
+ return nil, "", err
+ }
+
+ // Compress the multipart form data if compression is enabled
+ if compressed {
+ compressedData, err := compressData(buf.Bytes())
+ if err != nil {
+ return nil, "", err
+ }
+ return compressedData, writer.FormDataContentType(), nil
+ }
+
+ return buf.Bytes(), writer.FormDataContentType(), nil
+}
diff --git a/internal/civisibility/utils/net/http_test.go b/internal/civisibility/utils/net/http_test.go
new file mode 100644
index 0000000000..45a6c167bc
--- /dev/null
+++ b/internal/civisibility/utils/net/http_test.go
@@ -0,0 +1,896 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package net
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+const DefaultMultipartMemorySize = 10 << 20 // 10MB
+
+// Mock server handlers for different test scenarios
+
+// Mock server for basic JSON and MessagePack requests with gzip handling
+func mockJSONMsgPackHandler(w http.ResponseWriter, r *http.Request) {
+ var body []byte
+ var err error
+
+ // Check if the request is gzip compressed and decompress it
+ if r.Header.Get(HeaderContentEncoding) == ContentEncodingGzip {
+ gzipReader, err := gzip.NewReader(r.Body)
+ if err != nil {
+ http.Error(w, "failed to decompress gzip", http.StatusBadRequest)
+ return
+ }
+ defer gzipReader.Close()
+ body, err = io.ReadAll(gzipReader)
+ } else {
+ body, err = io.ReadAll(r.Body)
+ }
+
+ if err != nil {
+ http.Error(w, "failed to read body", http.StatusBadRequest)
+ return
+ }
+
+ // Process JSON based on Content-Type
+ if r.Header.Get(HeaderContentType) == ContentTypeJSON {
+ var data map[string]interface{}
+ json.Unmarshal(body, &data)
+ w.Header().Set(HeaderContentType, ContentTypeJSON)
+ json.NewEncoder(w).Encode(map[string]interface{}{"received": data})
+ }
+}
+
+// Mock server for multipart form data with gzip handling
+func mockMultipartHandler(w http.ResponseWriter, r *http.Request) {
+ var err error
+
+ // Check if the request is gzip compressed and decompress it
+ if r.Header.Get(HeaderContentEncoding) == ContentEncodingGzip {
+ gzipReader, err := gzip.NewReader(r.Body)
+ if err != nil {
+ http.Error(w, "failed to decompress gzip", http.StatusBadRequest)
+ return
+ }
+ defer gzipReader.Close()
+
+ // Replace the request body with the decompressed body for further processing
+ r.Body = io.NopCloser(gzipReader)
+ }
+
+ // Parse multipart form data
+ err = r.ParseMultipartForm(DefaultMultipartMemorySize)
+ if err != nil {
+ http.Error(w, "cannot parse multipart form", http.StatusBadRequest)
+ return
+ }
+
+ response := make(map[string]string)
+ for key := range r.MultipartForm.File {
+ file, _, _ := r.FormFile(key)
+ content, _ := io.ReadAll(file)
+ response[key] = string(content)
+ }
+
+ w.Header().Set(HeaderContentType, ContentTypeJSON)
+ json.NewEncoder(w).Encode(response)
+}
+
+// Mock server for rate limiting with predictable reset timing
+func mockRateLimitHandler(w http.ResponseWriter, _ *http.Request) {
+ // Set the rate limit reset time to 2 seconds
+ w.Header().Set(HeaderRateLimitReset, "2")
+ http.Error(w, "Too Many Requests", HTTPStatusTooManyRequests)
+}
+
+// Test Suite
+
+func TestSendJSONRequest(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(mockJSONMsgPackHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "POST",
+ URL: server.URL,
+ Body: map[string]interface{}{"key": "value"},
+ Format: "json",
+ Compressed: false,
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, response.StatusCode)
+ assert.Equal(t, "json", response.Format)
+
+ var result map[string]interface{}
+ err = response.Unmarshal(&result)
+ assert.NoError(t, err)
+ assert.Equal(t, "value", result["received"].(map[string]interface{})["key"])
+}
+
+func TestSendMultipartFormDataRequest(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(mockMultipartHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "POST",
+ URL: server.URL,
+ Files: []FormFile{
+ {
+ FieldName: "file1",
+ FileName: "test.json",
+ Content: map[string]interface{}{"key": "value"},
+ ContentType: ContentTypeJSON,
+ },
+ {
+ FieldName: "file2",
+ FileName: "test.bin",
+ Content: []byte{0x01, 0x02, 0x03},
+ ContentType: ContentTypeOctetStream,
+ },
+ },
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, response.StatusCode)
+ assert.Equal(t, "json", response.Format)
+
+ var result map[string]interface{}
+ err = response.Unmarshal(&result)
+ assert.NoError(t, err)
+ assert.Equal(t, `{"key":"value"}`, result["file1"])
+ assert.Equal(t, "\x01\x02\x03", result["file2"])
+}
+
+func TestSendJSONRequestWithGzipCompression(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(mockJSONMsgPackHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "POST",
+ URL: server.URL,
+ Body: map[string]interface{}{"key": "value"},
+ Format: "json",
+ Compressed: true,
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, response.StatusCode)
+ assert.Equal(t, "json", response.Format)
+
+ var result map[string]interface{}
+ err = response.Unmarshal(&result)
+ assert.NoError(t, err)
+ assert.NotNil(t, result["received"], "Expected 'received' key to be present in the response")
+ assert.Equal(t, "value", result["received"].(map[string]interface{})["key"])
+}
+
+func TestSendMultipartFormDataRequestWithGzipCompression(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(mockMultipartHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "POST",
+ URL: server.URL,
+ Files: []FormFile{
+ {
+ FieldName: "file1",
+ FileName: "test.json",
+ Content: map[string]interface{}{"key": "value"},
+ ContentType: ContentTypeJSON,
+ },
+ {
+ FieldName: "file2",
+ FileName: "test.bin",
+ Content: []byte{0x01, 0x02, 0x03},
+ ContentType: ContentTypeOctetStream,
+ },
+ },
+ Compressed: true,
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, response.StatusCode)
+ assert.Equal(t, "json", response.Format)
+
+ var result map[string]interface{}
+ err = response.Unmarshal(&result)
+ assert.NoError(t, err)
+ assert.Equal(t, `{"key":"value"}`, result["file1"])
+ assert.Equal(t, "\x01\x02\x03", result["file2"])
+}
+
+func TestRateLimitHandlingWithRetries(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(mockRateLimitHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET", // No body needed for GET
+ URL: server.URL,
+ Compressed: true, // Enable gzip compression for GET
+ MaxRetries: 2,
+ Backoff: 1 * time.Second, // Exponential backoff fallback
+ }
+
+ start := time.Now()
+ response, err := handler.SendRequest(config)
+ elapsed := time.Since(start)
+
+ // Since the rate limit is set to reset after 2 seconds, and we retry twice,
+ // the minimum elapsed time should be at least 4 seconds (2s for each retry).
+ assert.Error(t, err)
+ assert.Nil(t, response)
+ assert.True(t, elapsed >= 4*time.Second, "Expected at least 4 seconds due to rate limit retry delay")
+}
+
+func TestGzipDecompressionError(t *testing.T) {
+ // Simulate corrupted gzip data
+ corruptedData := []byte{0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00}
+
+ _, err := decompressData(corruptedData)
+ assert.Error(t, err)
+}
+
+func TestExponentialBackoffDelays(t *testing.T) {
+ start := time.Now()
+
+ // Simulate exponential backoff with 3 retries and 1-second initial delay
+ for i := 0; i < 3; i++ {
+ exponentialBackoff(i, 1*time.Second)
+ }
+
+ elapsed := time.Since(start)
+ assert.True(t, elapsed >= 7*time.Second, "Expected at least 7 seconds due to exponential backoff")
+}
+
+func TestCreateMultipartFormDataWithUnsupportedContentType(t *testing.T) {
+ files := []FormFile{
+ {
+ FieldName: "file1",
+ FileName: "test.unknown",
+ Content: map[string]interface{}{"key": "value"},
+ ContentType: "unsupported/content-type", // Unsupported content type
+ },
+ }
+
+ _, _, err := createMultipartFormData(files, false)
+ assert.Error(t, err)
+}
+
+func TestRateLimitHandlingWithoutResetHeader(t *testing.T) {
+ // Mock server without 'x-ratelimit-reset' header
+ mockRateLimitHandlerWithoutHeader := func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "Too Many Requests", HTTPStatusTooManyRequests)
+ }
+
+ server := httptest.NewServer(http.HandlerFunc(mockRateLimitHandlerWithoutHeader))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET", // No body needed for GET
+ URL: server.URL,
+ Compressed: false,
+ MaxRetries: 2,
+ Backoff: 1 * time.Second,
+ }
+
+ start := time.Now()
+ response, err := handler.SendRequest(config)
+ elapsed := time.Since(start)
+
+ // With exponential backoff fallback, the minimum elapsed time should be at least 3 seconds (1s + 2s)
+ assert.Error(t, err)
+ assert.Nil(t, response)
+ assert.True(t, elapsed >= 3*time.Second, "Expected at least 3 seconds due to exponential backoff delay")
+}
+
+func TestSendRequestWithInvalidURL(t *testing.T) {
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET",
+ URL: "http://[::1]:namedport", // Invalid URL
+ Compressed: false,
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.Error(t, err)
+ assert.Nil(t, response)
+}
+
+func TestSendEmptyBodyWithGzipCompression(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(mockJSONMsgPackHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "POST",
+ URL: server.URL,
+ Body: nil, // Empty body
+ Format: "json",
+ Compressed: true,
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, response.StatusCode)
+}
+
+func TestCompressDataWithInvalidInput(t *testing.T) {
+ // Attempt to compress an invalid data type (e.g., an empty interface{})
+ _, err := compressData(nil)
+ assert.Error(t, err)
+}
+
+func TestSendPUTRequestWithJSONBody(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(mockJSONMsgPackHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "PUT",
+ URL: server.URL,
+ Body: map[string]interface{}{"key": "value"},
+ Format: "json",
+ Compressed: false,
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, response.StatusCode)
+}
+
+func TestSendDELETERequest(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(mockJSONMsgPackHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "DELETE",
+ URL: server.URL,
+ Compressed: false,
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, response.StatusCode)
+}
+
+func TestSendHEADRequest(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(mockJSONMsgPackHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "HEAD",
+ URL: server.URL,
+ Compressed: false,
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, response.StatusCode)
+}
+
+func TestSendRequestWithCustomHeaders(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(mockJSONMsgPackHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ customHeaderKey := "X-Custom-Header"
+ customHeaderValue := "CustomValue"
+
+ config := RequestConfig{
+ Method: "POST",
+ URL: server.URL,
+ Headers: map[string]string{
+ customHeaderKey: customHeaderValue,
+ },
+ Body: map[string]interface{}{"key": "value"},
+ Format: "json",
+ Compressed: false,
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, response.StatusCode)
+
+ // Verify that the custom header was correctly set
+ assert.Equal(t, customHeaderValue, config.Headers[customHeaderKey])
+}
+
+func TestSendRequestWithTimeout(t *testing.T) {
+ // Mock server that delays response
+ mockSlowHandler := func(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(5 * time.Second) // Delay longer than the client timeout
+ w.WriteHeader(http.StatusOK)
+ }
+
+ server := httptest.NewServer(http.HandlerFunc(mockSlowHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ handler.Client.Timeout = 2 * time.Second // Set client timeout to 2 seconds
+
+ config := RequestConfig{
+ Method: "GET",
+ URL: server.URL,
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.Error(t, err)
+ assert.Nil(t, response)
+}
+
+func TestSendRequestWithMaxRetriesExceeded(t *testing.T) {
+ // Mock server that always returns a 500 error
+ mockAlwaysFailHandler := func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "Internal Server Error", http.StatusInternalServerError)
+ }
+
+ server := httptest.NewServer(http.HandlerFunc(mockAlwaysFailHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET",
+ URL: server.URL,
+ Compressed: false,
+ MaxRetries: 2, // Only retry twice
+ Backoff: 500 * time.Millisecond,
+ }
+
+ start := time.Now()
+ response, err := handler.SendRequest(config)
+ elapsed := time.Since(start)
+
+ // Ensure retries were attempted
+ assert.Error(t, err)
+ assert.Nil(t, response)
+ assert.True(t, elapsed >= 1*time.Second, "Expected at least 1 second due to retry delay")
+}
+
+func TestGzipResponseDecompressionHandling(t *testing.T) {
+ // Mock server that returns a gzip-compressed response
+ mockGzipResponseHandler := func(w http.ResponseWriter, r *http.Request) {
+ originalResponse := `{"message": "Hello, Gzip!"}`
+ var buf bytes.Buffer
+ gzipWriter := gzip.NewWriter(&buf)
+ _, err := gzipWriter.Write([]byte(originalResponse))
+ if err != nil {
+ http.Error(w, "Failed to compress response", http.StatusInternalServerError)
+ return
+ }
+ gzipWriter.Close()
+
+ // Set headers and write compressed data
+ w.Header().Set(HeaderContentEncoding, ContentEncodingGzip)
+ w.Header().Set(HeaderContentType, ContentTypeJSON)
+ w.Write(buf.Bytes())
+ }
+
+ server := httptest.NewServer(http.HandlerFunc(mockGzipResponseHandler))
+ defer server.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET",
+ URL: server.URL,
+ Compressed: false, // Compression not needed for request, only testing response decompression
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, response.StatusCode)
+ assert.Equal(t, "json", response.Format)
+
+ // Check that the response body was correctly decompressed
+ var result map[string]string
+ err = response.Unmarshal(&result)
+ assert.NoError(t, err)
+ assert.Equal(t, "Hello, Gzip!", result["message"])
+}
+
+func TestSendRequestWithUnsupportedFormat(t *testing.T) {
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "POST",
+ URL: "http://example.com",
+ Body: map[string]interface{}{"key": "value"},
+ Format: "unsupported_format", // Unsupported format
+ Compressed: false,
+ }
+
+ response, err := handler.SendRequest(config)
+ assert.Error(t, err) // Unsupported format error
+ assert.Nil(t, response)
+}
+
+func TestSendRequestWithInvalidMethod(t *testing.T) {
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "",
+ URL: "http://example.com",
+ }
+
+ _, err := handler.SendRequest(config)
+ assert.Error(t, err)
+ assert.EqualError(t, err, "HTTP method is required")
+}
+
+func TestSendRequestWithEmptyURL(t *testing.T) {
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET",
+ URL: "",
+ }
+
+ _, err := handler.SendRequest(config)
+ assert.Error(t, err)
+ assert.EqualError(t, err, "URL is required")
+}
+
+func TestSendRequestWithNetworkError(t *testing.T) {
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET",
+ URL: "http://invalid-url",
+ Backoff: 10 * time.Millisecond,
+ }
+
+ _, err := handler.SendRequest(config)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "max retries exceeded")
+}
+
+func TestSerializeNilDataToJSON(t *testing.T) {
+ data, err := serializeData(nil, FormatJSON)
+ assert.NoError(t, err)
+ assert.Equal(t, []byte("null"), data)
+}
+
+func TestCompressEmptyData(t *testing.T) {
+ data, err := compressData([]byte{})
+ assert.NoError(t, err)
+ assert.NotEmpty(t, data)
+}
+
+func TestDecompressValidGzipData(t *testing.T) {
+ var buf bytes.Buffer
+ writer := gzip.NewWriter(&buf)
+ writer.Write([]byte("test data"))
+ writer.Close()
+
+ data, err := decompressData(buf.Bytes())
+ assert.NoError(t, err)
+ assert.Equal(t, []byte("test data"), data)
+}
+
+func TestExponentialBackoffWithNegativeRetryCount(t *testing.T) {
+ start := time.Now()
+ exponentialBackoff(-1, 100*time.Millisecond)
+ duration := time.Since(start)
+ assert.LessOrEqual(t, duration, 100*time.Millisecond)
+}
+
+func TestResponseUnmarshalWithUnsupportedFormat(t *testing.T) {
+ resp := &Response{
+ Body: []byte("data"),
+ Format: "unknown",
+ StatusCode: http.StatusOK,
+ CanUnmarshal: true,
+ }
+
+ var data interface{}
+ err := resp.Unmarshal(&data)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "unsupported format 'unknown'")
+}
+
+func TestSendRequestWithUnsupportedResponseFormat(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set(HeaderContentType, "application/xml")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte("test"))
+ }))
+ defer ts.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET",
+ URL: ts.URL,
+ }
+
+ resp, err := handler.SendRequest(config)
+ assert.NoError(t, err)
+ assert.Equal(t, "unknown", resp.Format)
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+ assert.True(t, resp.CanUnmarshal)
+
+ var data interface{}
+ err = resp.Unmarshal(&data)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "unsupported format 'unknown'")
+}
+
+func TestPrepareContentWithNonByteContentForOctetStream(t *testing.T) {
+ _, err := prepareContent(12345, ContentTypeOctetStream)
+ assert.Error(t, err)
+ assert.EqualError(t, err, "content must be []byte for octet-stream content type")
+}
+
+func TestCreateMultipartFormDataWithCompression(t *testing.T) {
+ files := []FormFile{
+ {
+ FieldName: "file1",
+ FileName: "test.txt",
+ Content: []byte("test content"),
+ ContentType: ContentTypeOctetStream,
+ },
+ }
+
+ data, contentType, err := createMultipartFormData(files, true)
+ assert.NoError(t, err)
+ assert.Contains(t, contentType, "multipart/form-data; boundary=")
+ assert.NotEmpty(t, data)
+
+ // Decompress the data to verify the content
+ decompressedData, err := decompressData(data)
+ assert.NoError(t, err)
+ assert.Contains(t, string(decompressedData), "test content")
+}
+
+func TestSendRequestWithBodySerializationError(t *testing.T) {
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "POST",
+ URL: "http://example.com",
+ Body: make(chan int), // Channels cannot be serialized
+ Format: FormatJSON,
+ }
+
+ _, err := handler.SendRequest(config)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "unsupported type: chan int")
+}
+
+func TestSendRequestWithCompressedResponse(t *testing.T) {
+ // Server that returns a compressed response
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set(HeaderContentType, ContentTypeJSON)
+ w.Header().Set(HeaderContentEncoding, ContentEncodingGzip)
+ var buf bytes.Buffer
+ writer := gzip.NewWriter(&buf)
+ writer.Write([]byte(`{"message": "compressed response"}`))
+ writer.Close()
+ w.WriteHeader(http.StatusOK)
+ w.Write(buf.Bytes())
+ }))
+ defer ts.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET",
+ URL: ts.URL,
+ Compressed: true,
+ }
+
+ resp, err := handler.SendRequest(config)
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+ assert.Equal(t, FormatJSON, resp.Format)
+ assert.True(t, resp.CanUnmarshal)
+
+ var data map[string]string
+ err = resp.Unmarshal(&data)
+ assert.NoError(t, err)
+ assert.Equal(t, "compressed response", data["message"])
+}
+
+func TestSendRequestWithRetryAfterHeader(t *testing.T) {
+ attempts := 0
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if attempts == 0 {
+ w.Header().Set(HeaderRateLimitReset, "1") // Wait 1 second
+ w.WriteHeader(HTTPStatusTooManyRequests)
+ attempts++
+ return
+ }
+ w.Header().Set(HeaderContentType, ContentTypeJSON)
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(`{"success": true}`))
+ }))
+ defer ts.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET",
+ URL: ts.URL,
+ MaxRetries: 2,
+ Backoff: 100 * time.Millisecond,
+ }
+
+ start := time.Now()
+ resp, err := handler.SendRequest(config)
+ duration := time.Since(start)
+
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+ assert.True(t, resp.CanUnmarshal)
+ assert.GreaterOrEqual(t, duration, time.Second) // Ensures wait time was respected
+
+ var data map[string]bool
+ err = resp.Unmarshal(&data)
+ assert.NoError(t, err)
+ assert.True(t, data["success"])
+}
+
+func TestSendRequestWithInvalidRetryAfterHeader(t *testing.T) {
+ attempts := 0
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if attempts == 0 {
+ w.Header().Set(HeaderRateLimitReset, "invalid") // Invalid value
+ w.WriteHeader(HTTPStatusTooManyRequests)
+ attempts++
+ return
+ }
+ w.Header().Set(HeaderContentType, ContentTypeJSON)
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(`{"success": true}`))
+ }))
+ defer ts.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET",
+ URL: ts.URL,
+ MaxRetries: 2,
+ Backoff: 100 * time.Millisecond,
+ }
+
+ start := time.Now()
+ resp, err := handler.SendRequest(config)
+ duration := time.Since(start)
+
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+ assert.True(t, resp.CanUnmarshal)
+ assert.GreaterOrEqual(t, duration, 100*time.Millisecond) // Backoff was used
+
+ var data map[string]bool
+ err = resp.Unmarshal(&data)
+ assert.NoError(t, err)
+ assert.True(t, data["success"])
+}
+
+func TestExponentialBackoffWithMaxDelay(t *testing.T) {
+ start := time.Now()
+ exponentialBackoff(10, 1*time.Second) // Should be limited to maxDelay (30s)
+ duration := time.Since(start)
+ assert.LessOrEqual(t, duration, 31*time.Second)
+}
+
+func TestSendRequestWithContextTimeout(t *testing.T) {
+ handler := &RequestHandler{
+ Client: &http.Client{
+ Timeout: 50 * time.Millisecond,
+ },
+ }
+
+ // Server that sleeps longer than client timeout
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(100 * time.Millisecond)
+ w.WriteHeader(http.StatusOK)
+ }))
+ defer ts.Close()
+
+ config := RequestConfig{
+ Method: "GET",
+ URL: ts.URL,
+ }
+
+ _, err := handler.SendRequest(config)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "max retries exceeded")
+}
+
+func TestSendRequestWithRateLimitButNoResetHeader(t *testing.T) {
+ attempts := 0
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if attempts < 2 {
+ w.WriteHeader(HTTPStatusTooManyRequests)
+ attempts++
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte("OK"))
+ }))
+ defer ts.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET",
+ URL: ts.URL,
+ MaxRetries: 3,
+ Backoff: 100 * time.Millisecond,
+ }
+
+ start := time.Now()
+ resp, err := handler.SendRequest(config)
+ duration := time.Since(start)
+
+ assert.NoError(t, err)
+ assert.Equal(t, http.StatusOK, resp.StatusCode)
+ assert.GreaterOrEqual(t, duration, 300*time.Millisecond)
+ assert.Equal(t, []byte("OK"), resp.Body)
+}
+
+func TestSendRequestWhenServerClosesConnection(t *testing.T) {
+ ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ h1 := w.(http.Hijacker)
+ conn, _, _ := h1.Hijack()
+ conn.Close()
+ }))
+ ts.EnableHTTP2 = false // Disable HTTP/2 to allow hijacking
+ ts.Start()
+ defer ts.Close()
+
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET",
+ URL: ts.URL,
+ MaxRetries: 1,
+ Backoff: 100 * time.Millisecond,
+ }
+
+ _, err := handler.SendRequest(config)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "max retries exceeded")
+}
+
+func TestSendRequestWithInvalidPortAndMaxRetriesExceeded(t *testing.T) {
+ handler := NewRequestHandler()
+ config := RequestConfig{
+ Method: "GET",
+ URL: "http://localhost:0", // Invalid port to force error
+ MaxRetries: 2,
+ Backoff: 10 * time.Millisecond,
+ }
+
+ _, err := handler.SendRequest(config)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "max retries exceeded")
+}
+
+func TestPrepareContentWithNilContent(t *testing.T) {
+ data, err := prepareContent(nil, ContentTypeJSON)
+ assert.NoError(t, err)
+ assert.Equal(t, []byte("null"), data)
+}
+
+func TestSerializeDataWithInvalidDataType(t *testing.T) {
+ _, err := serializeData(make(chan int), FormatJSON)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "unsupported type: chan int")
+}
diff --git a/internal/civisibility/utils/net/searchcommits_api.go b/internal/civisibility/utils/net/searchcommits_api.go
new file mode 100644
index 0000000000..2aa787b77b
--- /dev/null
+++ b/internal/civisibility/utils/net/searchcommits_api.go
@@ -0,0 +1,62 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package net
+
+import (
+ "fmt"
+)
+
+const (
+ searchCommitsType string = "commit"
+ searchCommitsURLPath string = "api/v2/git/repository/search_commits"
+)
+
+type (
+ searchCommits struct {
+ Data []searchCommitsData `json:"data"`
+ Meta searchCommitsMeta `json:"meta"`
+ }
+ searchCommitsData struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ }
+ searchCommitsMeta struct {
+ RepositoryURL string `json:"repository_url"`
+ }
+)
+
+func (c *client) GetCommits(localCommits []string) ([]string, error) {
+ body := searchCommits{
+ Data: []searchCommitsData{},
+ Meta: searchCommitsMeta{
+ RepositoryURL: c.repositoryURL,
+ },
+ }
+
+ for _, localCommit := range localCommits {
+ body.Data = append(body.Data, searchCommitsData{
+ ID: localCommit,
+ Type: searchCommitsType,
+ })
+ }
+
+ response, err := c.handler.SendRequest(*c.getPostRequestConfig(searchCommitsURLPath, body))
+ if err != nil {
+ return nil, fmt.Errorf("sending search commits request: %s", err.Error())
+ }
+
+ var responseObject searchCommits
+ err = response.Unmarshal(&responseObject)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshalling search commits response: %s", err.Error())
+ }
+
+ var commits []string
+ for _, commit := range responseObject.Data {
+ commits = append(commits, commit.ID)
+ }
+ return commits, nil
+}
diff --git a/internal/civisibility/utils/net/searchcommits_api_test.go b/internal/civisibility/utils/net/searchcommits_api_test.go
new file mode 100644
index 0000000000..ec0e612fcd
--- /dev/null
+++ b/internal/civisibility/utils/net/searchcommits_api_test.go
@@ -0,0 +1,105 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package net
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSearchCommitsApiRequest(t *testing.T) {
+ var c *client
+ expectedResponse := searchCommits{
+ Data: []searchCommitsData{
+ {
+ ID: "commit3",
+ Type: searchCommitsType,
+ },
+ {
+ ID: "commit4",
+ Type: searchCommitsType,
+ },
+ },
+ }
+
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, "failed to read body", http.StatusBadRequest)
+ return
+ }
+
+ if r.Header.Get(HeaderContentType) == ContentTypeJSON {
+ var request searchCommits
+ json.Unmarshal(body, &request)
+ assert.Equal(t, c.repositoryURL, request.Meta.RepositoryURL)
+ assert.Equal(t, "commit1", request.Data[0].ID)
+ assert.Equal(t, searchCommitsType, request.Data[0].Type)
+ assert.Equal(t, "commit2", request.Data[1].ID)
+ assert.Equal(t, searchCommitsType, request.Data[1].Type)
+
+ w.Header().Set(HeaderContentType, ContentTypeJSON)
+ json.NewEncoder(w).Encode(expectedResponse)
+ }
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ c = cInterface.(*client)
+ remoteCommits, err := cInterface.GetCommits([]string{"commit1", "commit2"})
+ assert.Nil(t, err)
+ assert.Equal(t, []string{"commit3", "commit4"}, remoteCommits)
+}
+
+func TestSearchCommitsApiRequestFailToUnmarshal(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "failed to read body", http.StatusBadRequest)
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ remoteCommits, err := cInterface.GetCommits([]string{"commit1", "commit2"})
+ assert.Nil(t, remoteCommits)
+ assert.NotNil(t, err)
+ assert.Contains(t, err.Error(), "cannot unmarshal response")
+}
+
+func TestSearchCommitsApiRequestFailToGet(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "internal processing error", http.StatusInternalServerError)
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ remoteCommits, err := cInterface.GetCommits([]string{"commit1", "commit2"})
+ assert.Nil(t, remoteCommits)
+ assert.NotNil(t, err)
+ assert.Contains(t, err.Error(), "sending search commits request")
+}
diff --git a/internal/civisibility/utils/net/sendpackfiles_api.go b/internal/civisibility/utils/net/sendpackfiles_api.go
new file mode 100644
index 0000000000..6a7db18dbd
--- /dev/null
+++ b/internal/civisibility/utils/net/sendpackfiles_api.go
@@ -0,0 +1,91 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package net
+
+import (
+ "fmt"
+ "os"
+)
+
+const (
+ sendPackFilesURLPath string = "api/v2/git/repository/packfile"
+)
+
+type (
+ pushedShaBody struct {
+ Data pushedShaData `json:"data"`
+ Meta pushedShaMeta `json:"meta"`
+ }
+ pushedShaData struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ }
+ pushedShaMeta struct {
+ RepositoryURL string `json:"repository_url"`
+ }
+)
+
+func (c *client) SendPackFiles(commitSha string, packFiles []string) (bytes int64, err error) {
+ if len(packFiles) == 0 {
+ return 0, nil
+ }
+
+ if commitSha == "" {
+ commitSha = c.commitSha
+ }
+
+ pushedShaFormFile := FormFile{
+ FieldName: "pushedSha",
+ Content: pushedShaBody{
+ Data: pushedShaData{
+ ID: commitSha,
+ Type: searchCommitsType,
+ },
+ Meta: pushedShaMeta{
+ RepositoryURL: c.repositoryURL,
+ },
+ },
+ ContentType: ContentTypeJSON,
+ }
+
+ for _, file := range packFiles {
+ fileContent, fileErr := os.ReadFile(file)
+ if fileErr != nil {
+ err = fmt.Errorf("failed to read pack file: %s", fileErr.Error())
+ return
+ }
+
+ request := RequestConfig{
+ Method: "POST",
+ URL: c.getURLPath(sendPackFilesURLPath),
+ Headers: c.headers,
+ Files: []FormFile{
+ pushedShaFormFile,
+ {
+ FieldName: "packfile",
+ Content: fileContent,
+ ContentType: ContentTypeOctetStream,
+ },
+ },
+ MaxRetries: DefaultMaxRetries,
+ Backoff: DefaultBackoff,
+ }
+
+ response, responseErr := c.handler.SendRequest(request)
+ if responseErr != nil {
+ err = fmt.Errorf("failed to send packfile request: %s", responseErr.Error())
+ return
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ err = fmt.Errorf("unexpected response code %d: %s", response.StatusCode, string(response.Body))
+ }
+
+ bytes += int64(len(fileContent))
+ }
+
+ return
+}
diff --git a/internal/civisibility/utils/net/sendpackfiles_api_test.go b/internal/civisibility/utils/net/sendpackfiles_api_test.go
new file mode 100644
index 0000000000..652c23b5be
--- /dev/null
+++ b/internal/civisibility/utils/net/sendpackfiles_api_test.go
@@ -0,0 +1,155 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package net
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSendPackFilesApiRequest(t *testing.T) {
+ var c *client
+
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ reader, err := r.MultipartReader()
+ if err != nil {
+ http.Error(w, "failed to read body", http.StatusBadRequest)
+ return
+ }
+
+ containsPushedSha := false
+ containsPackFile := false
+ for {
+ part, errPart := reader.NextPart()
+ if errPart == io.EOF {
+ break
+ }
+ partName := part.FormName()
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(part)
+ if partName == "pushedSha" {
+ assert.Equal(t, ContentTypeJSON, part.Header.Get(HeaderContentType))
+ var request pushedShaBody
+ json.Unmarshal(buf.Bytes(), &request)
+ assert.Equal(t, c.repositoryURL, request.Meta.RepositoryURL)
+ assert.Equal(t, c.commitSha, request.Data.ID)
+ assert.Equal(t, searchCommitsType, request.Data.Type)
+ containsPushedSha = true
+ } else if partName == "packfile" {
+ assert.Equal(t, ContentTypeOctetStream, part.Header.Get(HeaderContentType))
+ assert.NotZero(t, buf.Bytes())
+ containsPackFile = true
+ }
+ }
+
+ assert.True(t, containsPushedSha)
+ assert.True(t, containsPackFile)
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ c = cInterface.(*client)
+ _, err := cInterface.SendPackFiles(c.commitSha, []string{
+ "sendpackfiles_api_test.go",
+ })
+ assert.Nil(t, err)
+}
+
+func TestSendPackFilesApiRequestFailToUnmarshal(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "failed to read body", http.StatusBadRequest)
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ c := cInterface.(*client)
+ _, err := cInterface.SendPackFiles(c.commitSha, []string{
+ "sendpackfiles_api_test.go",
+ })
+ assert.NotNil(t, err)
+ assert.Contains(t, err.Error(), "unexpected response code")
+}
+
+func TestSendPackFilesApiRequestFailToGet(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "internal processing error", http.StatusInternalServerError)
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ c := cInterface.(*client)
+ bytes, err := cInterface.SendPackFiles(c.commitSha, []string{
+ "sendpackfiles_api_test.go",
+ })
+ assert.Zero(t, bytes)
+ assert.NotNil(t, err)
+ assert.Contains(t, err.Error(), "failed to send packfile request")
+}
+
+func TestSendPackFilesApiRequestFileError(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "internal processing error", http.StatusInternalServerError)
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ c := cInterface.(*client)
+ bytes, err := cInterface.SendPackFiles(c.commitSha, []string{
+ "unknown.file",
+ })
+ assert.Zero(t, bytes)
+ assert.NotNil(t, err)
+ assert.Contains(t, err.Error(), "failed to read pack file")
+}
+
+func TestSendPackFilesApiRequestNoFile(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "internal processing error", http.StatusInternalServerError)
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ bytes, err := cInterface.SendPackFiles("", nil)
+ assert.Zero(t, bytes)
+ assert.Nil(t, err)
+}
diff --git a/internal/civisibility/utils/net/settings_api.go b/internal/civisibility/utils/net/settings_api.go
new file mode 100644
index 0000000000..effc7b6fc7
--- /dev/null
+++ b/internal/civisibility/utils/net/settings_api.go
@@ -0,0 +1,92 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package net
+
+import (
+ "fmt"
+)
+
+const (
+ settingsRequestType string = "ci_app_test_service_libraries_settings"
+ settingsURLPath string = "api/v2/libraries/tests/services/setting"
+)
+
+type (
+ settingsRequest struct {
+ Data settingsRequestHeader `json:"data"`
+ }
+
+ settingsRequestHeader struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ Attributes SettingsRequestData `json:"attributes"`
+ }
+
+ SettingsRequestData struct {
+ Service string `json:"service,omitempty"`
+ Env string `json:"env,omitempty"`
+ RepositoryURL string `json:"repository_url,omitempty"`
+ Branch string `json:"branch,omitempty"`
+ Sha string `json:"sha,omitempty"`
+ Configurations testConfigurations `json:"configurations,omitempty"`
+ }
+
+ settingsResponse struct {
+ Data struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ Attributes SettingsResponseData `json:"attributes"`
+ } `json:"data,omitempty"`
+ }
+
+ SettingsResponseData struct {
+ CodeCoverage bool `json:"code_coverage"`
+ EarlyFlakeDetection struct {
+ Enabled bool `json:"enabled"`
+ SlowTestRetries struct {
+ TenS int `json:"10s"`
+ ThirtyS int `json:"30s"`
+ FiveM int `json:"5m"`
+ FiveS int `json:"5s"`
+ } `json:"slow_test_retries"`
+ FaultySessionThreshold int `json:"faulty_session_threshold"`
+ } `json:"early_flake_detection"`
+ FlakyTestRetriesEnabled bool `json:"flaky_test_retries_enabled"`
+ ItrEnabled bool `json:"itr_enabled"`
+ RequireGit bool `json:"require_git"`
+ TestsSkipping bool `json:"tests_skipping"`
+ }
+)
+
+func (c *client) GetSettings() (*SettingsResponseData, error) {
+ body := settingsRequest{
+ Data: settingsRequestHeader{
+ ID: c.id,
+ Type: settingsRequestType,
+ Attributes: SettingsRequestData{
+ Service: c.serviceName,
+ Env: c.environment,
+ RepositoryURL: c.repositoryURL,
+ Branch: c.branchName,
+ Sha: c.commitSha,
+ Configurations: c.testConfigurations,
+ },
+ },
+ }
+
+ response, err := c.handler.SendRequest(*c.getPostRequestConfig(settingsURLPath, body))
+ if err != nil {
+ return nil, fmt.Errorf("sending get settings request: %s", err.Error())
+ }
+
+ var responseObject settingsResponse
+ err = response.Unmarshal(&responseObject)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshalling settings response: %s", err.Error())
+ }
+
+ return &responseObject.Data.Attributes, nil
+}
diff --git a/internal/civisibility/utils/net/settings_api_test.go b/internal/civisibility/utils/net/settings_api_test.go
new file mode 100644
index 0000000000..92ce8993d9
--- /dev/null
+++ b/internal/civisibility/utils/net/settings_api_test.go
@@ -0,0 +1,111 @@
+// Unless explicitly stated otherwise all files in this repository are licensed
+// under the Apache License Version 2.0.
+// This product includes software developed at Datadog (https://www.datadoghq.com/).
+// Copyright 2024 Datadog, Inc.
+
+package net
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSettingsApiRequest(t *testing.T) {
+ var c *client
+ expectedResponse := settingsResponse{}
+ expectedResponse.Data.Type = settingsRequestType
+ expectedResponse.Data.Attributes.FlakyTestRetriesEnabled = true
+ expectedResponse.Data.Attributes.CodeCoverage = true
+ expectedResponse.Data.Attributes.TestsSkipping = true
+ expectedResponse.Data.Attributes.ItrEnabled = true
+ expectedResponse.Data.Attributes.RequireGit = true
+ expectedResponse.Data.Attributes.EarlyFlakeDetection.FaultySessionThreshold = 30
+ expectedResponse.Data.Attributes.EarlyFlakeDetection.Enabled = true
+ expectedResponse.Data.Attributes.EarlyFlakeDetection.SlowTestRetries.FiveS = 25
+ expectedResponse.Data.Attributes.EarlyFlakeDetection.SlowTestRetries.TenS = 20
+ expectedResponse.Data.Attributes.EarlyFlakeDetection.SlowTestRetries.ThirtyS = 10
+ expectedResponse.Data.Attributes.EarlyFlakeDetection.SlowTestRetries.FiveM = 5
+
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, "failed to read body", http.StatusBadRequest)
+ return
+ }
+
+ if r.Header.Get(HeaderContentType) == ContentTypeJSON {
+ var request settingsRequest
+ json.Unmarshal(body, &request)
+ assert.Equal(t, c.id, request.Data.ID)
+ assert.Equal(t, settingsRequestType, request.Data.Type)
+ assert.Equal(t, settingsURLPath, r.URL.Path[1:])
+ assert.Equal(t, c.commitSha, request.Data.Attributes.Sha)
+ assert.Equal(t, c.branchName, request.Data.Attributes.Branch)
+ assert.Equal(t, c.environment, request.Data.Attributes.Env)
+ assert.Equal(t, c.repositoryURL, request.Data.Attributes.RepositoryURL)
+ assert.Equal(t, c.serviceName, request.Data.Attributes.Service)
+ assert.Equal(t, c.testConfigurations, request.Data.Attributes.Configurations)
+
+ w.Header().Set(HeaderContentType, ContentTypeJSON)
+ expectedResponse.Data.ID = request.Data.ID
+ json.NewEncoder(w).Encode(expectedResponse)
+ }
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ c = cInterface.(*client)
+ settings, err := cInterface.GetSettings()
+ assert.Nil(t, err)
+ assert.Equal(t, expectedResponse.Data.Attributes, *settings)
+}
+
+func TestSettingsApiRequestFailToUnmarshal(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "failed to read body", http.StatusBadRequest)
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ settings, err := cInterface.GetSettings()
+ assert.Nil(t, settings)
+ assert.NotNil(t, err)
+ assert.Contains(t, err.Error(), "cannot unmarshal response")
+}
+
+func TestSettingsApiRequestFailToGet(t *testing.T) {
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "internal processing error", http.StatusInternalServerError)
+ }))
+ defer server.Close()
+
+ origEnv := saveEnv()
+ path := os.Getenv("PATH")
+ defer restoreEnv(origEnv)
+
+ setCiVisibilityEnv(path, server.URL)
+
+ cInterface := NewClient()
+ settings, err := cInterface.GetSettings()
+ assert.Nil(t, settings)
+ assert.NotNil(t, err)
+ assert.Contains(t, err.Error(), "sending get settings request")
+}
diff --git a/internal/civisibility/utils/testdata/fixtures/github-event.json b/internal/civisibility/utils/testdata/fixtures/github-event.json
new file mode 100644
index 0000000000..b9fe79f2aa
--- /dev/null
+++ b/internal/civisibility/utils/testdata/fixtures/github-event.json
@@ -0,0 +1,490 @@
+{
+ "action": "synchronize",
+ "after": "df289512a51123083a8e6931dd6f57bb3883d4c4",
+ "before": "f659d2fdd7bedffb40d9ab223dbde6afa5eadc32",
+ "number": 1,
+ "pull_request": {
+ "_links": {
+ "comments": {
+ "href": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues/1/comments"
+ },
+ "commits": {
+ "href": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/pulls/1/commits"
+ },
+ "html": {
+ "href": "https://github.com/nikita-tkachenko-datadog/ci-test-project/pull/1"
+ },
+ "issue": {
+ "href": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues/1"
+ },
+ "review_comment": {
+ "href": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/pulls/comments{/number}"
+ },
+ "review_comments": {
+ "href": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/pulls/1/comments"
+ },
+ "self": {
+ "href": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/pulls/1"
+ },
+ "statuses": {
+ "href": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/statuses/df289512a51123083a8e6931dd6f57bb3883d4c4"
+ }
+ },
+ "active_lock_reason": null,
+ "additions": 2,
+ "assignee": null,
+ "assignees": [],
+ "author_association": "OWNER",
+ "auto_merge": null,
+ "base": {
+ "label": "nikita-tkachenko-datadog:main",
+ "ref": "main",
+ "repo": {
+ "allow_auto_merge": false,
+ "allow_forking": true,
+ "allow_merge_commit": true,
+ "allow_rebase_merge": true,
+ "allow_squash_merge": true,
+ "allow_update_branch": false,
+ "archive_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/{archive_format}{/ref}",
+ "archived": false,
+ "assignees_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/assignees{/user}",
+ "blobs_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/blobs{/sha}",
+ "branches_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/branches{/branch}",
+ "clone_url": "https://github.com/nikita-tkachenko-datadog/ci-test-project.git",
+ "collaborators_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/collaborators{/collaborator}",
+ "comments_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/comments{/number}",
+ "commits_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/commits{/sha}",
+ "compare_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/compare/{base}...{head}",
+ "contents_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/contents/{+path}",
+ "contributors_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/contributors",
+ "created_at": "2023-01-09T10:24:06Z",
+ "default_branch": "main",
+ "delete_branch_on_merge": false,
+ "deployments_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/deployments",
+ "description": null,
+ "disabled": false,
+ "downloads_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/downloads",
+ "events_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/events",
+ "fork": false,
+ "forks": 0,
+ "forks_count": 0,
+ "forks_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/forks",
+ "full_name": "nikita-tkachenko-datadog/ci-test-project",
+ "git_commits_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/commits{/sha}",
+ "git_refs_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/refs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/tags{/sha}",
+ "git_url": "git://github.com/nikita-tkachenko-datadog/ci-test-project.git",
+ "has_discussions": false,
+ "has_downloads": true,
+ "has_issues": true,
+ "has_pages": false,
+ "has_projects": true,
+ "has_wiki": false,
+ "homepage": null,
+ "hooks_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/hooks",
+ "html_url": "https://github.com/nikita-tkachenko-datadog/ci-test-project",
+ "id": 586827266,
+ "is_template": false,
+ "issue_comment_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues/comments{/number}",
+ "issue_events_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues/events{/number}",
+ "issues_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues{/number}",
+ "keys_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/keys{/key_id}",
+ "labels_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/labels{/name}",
+ "language": "Shell",
+ "languages_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/languages",
+ "license": null,
+ "merge_commit_message": "PR_TITLE",
+ "merge_commit_title": "MERGE_MESSAGE",
+ "merges_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/merges",
+ "milestones_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/milestones{/number}",
+ "mirror_url": null,
+ "name": "ci-test-project",
+ "node_id": "R_kgDOIvpGAg",
+ "notifications_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/notifications{?since,all,participating}",
+ "open_issues": 1,
+ "open_issues_count": 1,
+ "owner": {
+ "avatar_url": "https://avatars.githubusercontent.com/u/121111529?v=4",
+ "events_url": "https://api.github.com/users/nikita-tkachenko-datadog/events{/privacy}",
+ "followers_url": "https://api.github.com/users/nikita-tkachenko-datadog/followers",
+ "following_url": "https://api.github.com/users/nikita-tkachenko-datadog/following{/other_user}",
+ "gists_url": "https://api.github.com/users/nikita-tkachenko-datadog/gists{/gist_id}",
+ "gravatar_id": "",
+ "html_url": "https://github.com/nikita-tkachenko-datadog",
+ "id": 121111529,
+ "login": "nikita-tkachenko-datadog",
+ "node_id": "U_kgDOBzgD6Q",
+ "organizations_url": "https://api.github.com/users/nikita-tkachenko-datadog/orgs",
+ "received_events_url": "https://api.github.com/users/nikita-tkachenko-datadog/received_events",
+ "repos_url": "https://api.github.com/users/nikita-tkachenko-datadog/repos",
+ "site_admin": false,
+ "starred_url": "https://api.github.com/users/nikita-tkachenko-datadog/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/nikita-tkachenko-datadog/subscriptions",
+ "type": "User",
+ "url": "https://api.github.com/users/nikita-tkachenko-datadog"
+ },
+ "private": true,
+ "pulls_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/pulls{/number}",
+ "pushed_at": "2024-09-11T15:12:25Z",
+ "releases_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/releases{/id}",
+ "size": 90,
+ "squash_merge_commit_message": "COMMIT_MESSAGES",
+ "squash_merge_commit_title": "COMMIT_OR_PR_TITLE",
+ "ssh_url": "git@github.com:nikita-tkachenko-datadog/ci-test-project.git",
+ "stargazers_count": 0,
+ "stargazers_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/stargazers",
+ "statuses_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/statuses/{sha}",
+ "subscribers_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/subscribers",
+ "subscription_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/subscription",
+ "svn_url": "https://github.com/nikita-tkachenko-datadog/ci-test-project",
+ "tags_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/tags",
+ "teams_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/teams",
+ "topics": [],
+ "trees_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/trees{/sha}",
+ "updated_at": "2024-09-11T13:41:11Z",
+ "url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project",
+ "use_squash_pr_title_as_default": false,
+ "visibility": "private",
+ "watchers": 0,
+ "watchers_count": 0,
+ "web_commit_signoff_required": false
+ },
+ "sha": "52e0974c74d41160a03d59ddc73bb9f5adab054b",
+ "user": {
+ "avatar_url": "https://avatars.githubusercontent.com/u/121111529?v=4",
+ "events_url": "https://api.github.com/users/nikita-tkachenko-datadog/events{/privacy}",
+ "followers_url": "https://api.github.com/users/nikita-tkachenko-datadog/followers",
+ "following_url": "https://api.github.com/users/nikita-tkachenko-datadog/following{/other_user}",
+ "gists_url": "https://api.github.com/users/nikita-tkachenko-datadog/gists{/gist_id}",
+ "gravatar_id": "",
+ "html_url": "https://github.com/nikita-tkachenko-datadog",
+ "id": 121111529,
+ "login": "nikita-tkachenko-datadog",
+ "node_id": "U_kgDOBzgD6Q",
+ "organizations_url": "https://api.github.com/users/nikita-tkachenko-datadog/orgs",
+ "received_events_url": "https://api.github.com/users/nikita-tkachenko-datadog/received_events",
+ "repos_url": "https://api.github.com/users/nikita-tkachenko-datadog/repos",
+ "site_admin": false,
+ "starred_url": "https://api.github.com/users/nikita-tkachenko-datadog/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/nikita-tkachenko-datadog/subscriptions",
+ "type": "User",
+ "url": "https://api.github.com/users/nikita-tkachenko-datadog"
+ }
+ },
+ "body": "# What Does This Do\r\n\r\n# Motivation\r\n\r\n# Additional Notes\r\n",
+ "changed_files": 3,
+ "closed_at": null,
+ "comments": 0,
+ "comments_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues/1/comments",
+ "commits": 2,
+ "commits_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/pulls/1/commits",
+ "created_at": "2024-09-11T15:08:02Z",
+ "deletions": 0,
+ "diff_url": "https://github.com/nikita-tkachenko-datadog/ci-test-project/pull/1.diff",
+ "draft": false,
+ "head": {
+ "label": "nikita-tkachenko-datadog:test-branch",
+ "ref": "test-branch",
+ "repo": {
+ "allow_auto_merge": false,
+ "allow_forking": true,
+ "allow_merge_commit": true,
+ "allow_rebase_merge": true,
+ "allow_squash_merge": true,
+ "allow_update_branch": false,
+ "archive_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/{archive_format}{/ref}",
+ "archived": false,
+ "assignees_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/assignees{/user}",
+ "blobs_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/blobs{/sha}",
+ "branches_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/branches{/branch}",
+ "clone_url": "https://github.com/nikita-tkachenko-datadog/ci-test-project.git",
+ "collaborators_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/collaborators{/collaborator}",
+ "comments_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/comments{/number}",
+ "commits_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/commits{/sha}",
+ "compare_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/compare/{base}...{head}",
+ "contents_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/contents/{+path}",
+ "contributors_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/contributors",
+ "created_at": "2023-01-09T10:24:06Z",
+ "default_branch": "main",
+ "delete_branch_on_merge": false,
+ "deployments_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/deployments",
+ "description": null,
+ "disabled": false,
+ "downloads_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/downloads",
+ "events_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/events",
+ "fork": false,
+ "forks": 0,
+ "forks_count": 0,
+ "forks_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/forks",
+ "full_name": "nikita-tkachenko-datadog/ci-test-project",
+ "git_commits_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/commits{/sha}",
+ "git_refs_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/refs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/tags{/sha}",
+ "git_url": "git://github.com/nikita-tkachenko-datadog/ci-test-project.git",
+ "has_discussions": false,
+ "has_downloads": true,
+ "has_issues": true,
+ "has_pages": false,
+ "has_projects": true,
+ "has_wiki": false,
+ "homepage": null,
+ "hooks_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/hooks",
+ "html_url": "https://github.com/nikita-tkachenko-datadog/ci-test-project",
+ "id": 586827266,
+ "is_template": false,
+ "issue_comment_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues/comments{/number}",
+ "issue_events_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues/events{/number}",
+ "issues_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues{/number}",
+ "keys_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/keys{/key_id}",
+ "labels_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/labels{/name}",
+ "language": "Shell",
+ "languages_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/languages",
+ "license": null,
+ "merge_commit_message": "PR_TITLE",
+ "merge_commit_title": "MERGE_MESSAGE",
+ "merges_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/merges",
+ "milestones_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/milestones{/number}",
+ "mirror_url": null,
+ "name": "ci-test-project",
+ "node_id": "R_kgDOIvpGAg",
+ "notifications_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/notifications{?since,all,participating}",
+ "open_issues": 1,
+ "open_issues_count": 1,
+ "owner": {
+ "avatar_url": "https://avatars.githubusercontent.com/u/121111529?v=4",
+ "events_url": "https://api.github.com/users/nikita-tkachenko-datadog/events{/privacy}",
+ "followers_url": "https://api.github.com/users/nikita-tkachenko-datadog/followers",
+ "following_url": "https://api.github.com/users/nikita-tkachenko-datadog/following{/other_user}",
+ "gists_url": "https://api.github.com/users/nikita-tkachenko-datadog/gists{/gist_id}",
+ "gravatar_id": "",
+ "html_url": "https://github.com/nikita-tkachenko-datadog",
+ "id": 121111529,
+ "login": "nikita-tkachenko-datadog",
+ "node_id": "U_kgDOBzgD6Q",
+ "organizations_url": "https://api.github.com/users/nikita-tkachenko-datadog/orgs",
+ "received_events_url": "https://api.github.com/users/nikita-tkachenko-datadog/received_events",
+ "repos_url": "https://api.github.com/users/nikita-tkachenko-datadog/repos",
+ "site_admin": false,
+ "starred_url": "https://api.github.com/users/nikita-tkachenko-datadog/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/nikita-tkachenko-datadog/subscriptions",
+ "type": "User",
+ "url": "https://api.github.com/users/nikita-tkachenko-datadog"
+ },
+ "private": true,
+ "pulls_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/pulls{/number}",
+ "pushed_at": "2024-09-11T15:12:25Z",
+ "releases_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/releases{/id}",
+ "size": 90,
+ "squash_merge_commit_message": "COMMIT_MESSAGES",
+ "squash_merge_commit_title": "COMMIT_OR_PR_TITLE",
+ "ssh_url": "git@github.com:nikita-tkachenko-datadog/ci-test-project.git",
+ "stargazers_count": 0,
+ "stargazers_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/stargazers",
+ "statuses_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/statuses/{sha}",
+ "subscribers_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/subscribers",
+ "subscription_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/subscription",
+ "svn_url": "https://github.com/nikita-tkachenko-datadog/ci-test-project",
+ "tags_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/tags",
+ "teams_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/teams",
+ "topics": [],
+ "trees_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/trees{/sha}",
+ "updated_at": "2024-09-11T13:41:11Z",
+ "url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project",
+ "use_squash_pr_title_as_default": false,
+ "visibility": "private",
+ "watchers": 0,
+ "watchers_count": 0,
+ "web_commit_signoff_required": false
+ },
+ "sha": "df289512a51123083a8e6931dd6f57bb3883d4c4",
+ "user": {
+ "avatar_url": "https://avatars.githubusercontent.com/u/121111529?v=4",
+ "events_url": "https://api.github.com/users/nikita-tkachenko-datadog/events{/privacy}",
+ "followers_url": "https://api.github.com/users/nikita-tkachenko-datadog/followers",
+ "following_url": "https://api.github.com/users/nikita-tkachenko-datadog/following{/other_user}",
+ "gists_url": "https://api.github.com/users/nikita-tkachenko-datadog/gists{/gist_id}",
+ "gravatar_id": "",
+ "html_url": "https://github.com/nikita-tkachenko-datadog",
+ "id": 121111529,
+ "login": "nikita-tkachenko-datadog",
+ "node_id": "U_kgDOBzgD6Q",
+ "organizations_url": "https://api.github.com/users/nikita-tkachenko-datadog/orgs",
+ "received_events_url": "https://api.github.com/users/nikita-tkachenko-datadog/received_events",
+ "repos_url": "https://api.github.com/users/nikita-tkachenko-datadog/repos",
+ "site_admin": false,
+ "starred_url": "https://api.github.com/users/nikita-tkachenko-datadog/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/nikita-tkachenko-datadog/subscriptions",
+ "type": "User",
+ "url": "https://api.github.com/users/nikita-tkachenko-datadog"
+ }
+ },
+ "html_url": "https://github.com/nikita-tkachenko-datadog/ci-test-project/pull/1",
+ "id": 2066570986,
+ "issue_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues/1",
+ "labels": [],
+ "locked": false,
+ "maintainer_can_modify": false,
+ "merge_commit_sha": "d9a3212d0d5d1483426dbbdf0beea32ee50abcde",
+ "mergeable": null,
+ "mergeable_state": "unknown",
+ "merged": false,
+ "merged_at": null,
+ "merged_by": null,
+ "milestone": null,
+ "node_id": "PR_kwDOIvpGAs57LV7q",
+ "number": 1,
+ "patch_url": "https://github.com/nikita-tkachenko-datadog/ci-test-project/pull/1.patch",
+ "rebaseable": null,
+ "requested_reviewers": [],
+ "requested_teams": [],
+ "review_comment_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/pulls/comments{/number}",
+ "review_comments": 0,
+ "review_comments_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/pulls/1/comments",
+ "state": "open",
+ "statuses_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/statuses/df289512a51123083a8e6931dd6f57bb3883d4c4",
+ "title": "Test commit",
+ "updated_at": "2024-09-11T15:12:26Z",
+ "url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/pulls/1",
+ "user": {
+ "avatar_url": "https://avatars.githubusercontent.com/u/121111529?v=4",
+ "events_url": "https://api.github.com/users/nikita-tkachenko-datadog/events{/privacy}",
+ "followers_url": "https://api.github.com/users/nikita-tkachenko-datadog/followers",
+ "following_url": "https://api.github.com/users/nikita-tkachenko-datadog/following{/other_user}",
+ "gists_url": "https://api.github.com/users/nikita-tkachenko-datadog/gists{/gist_id}",
+ "gravatar_id": "",
+ "html_url": "https://github.com/nikita-tkachenko-datadog",
+ "id": 121111529,
+ "login": "nikita-tkachenko-datadog",
+ "node_id": "U_kgDOBzgD6Q",
+ "organizations_url": "https://api.github.com/users/nikita-tkachenko-datadog/orgs",
+ "received_events_url": "https://api.github.com/users/nikita-tkachenko-datadog/received_events",
+ "repos_url": "https://api.github.com/users/nikita-tkachenko-datadog/repos",
+ "site_admin": false,
+ "starred_url": "https://api.github.com/users/nikita-tkachenko-datadog/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/nikita-tkachenko-datadog/subscriptions",
+ "type": "User",
+ "url": "https://api.github.com/users/nikita-tkachenko-datadog"
+ }
+ },
+ "repository": {
+ "allow_forking": true,
+ "archive_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/{archive_format}{/ref}",
+ "archived": false,
+ "assignees_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/assignees{/user}",
+ "blobs_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/blobs{/sha}",
+ "branches_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/branches{/branch}",
+ "clone_url": "https://github.com/nikita-tkachenko-datadog/ci-test-project.git",
+ "collaborators_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/collaborators{/collaborator}",
+ "comments_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/comments{/number}",
+ "commits_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/commits{/sha}",
+ "compare_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/compare/{base}...{head}",
+ "contents_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/contents/{+path}",
+ "contributors_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/contributors",
+ "created_at": "2023-01-09T10:24:06Z",
+ "default_branch": "main",
+ "deployments_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/deployments",
+ "description": null,
+ "disabled": false,
+ "downloads_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/downloads",
+ "events_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/events",
+ "fork": false,
+ "forks": 0,
+ "forks_count": 0,
+ "forks_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/forks",
+ "full_name": "nikita-tkachenko-datadog/ci-test-project",
+ "git_commits_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/commits{/sha}",
+ "git_refs_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/refs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/tags{/sha}",
+ "git_url": "git://github.com/nikita-tkachenko-datadog/ci-test-project.git",
+ "has_discussions": false,
+ "has_downloads": true,
+ "has_issues": true,
+ "has_pages": false,
+ "has_projects": true,
+ "has_wiki": false,
+ "homepage": null,
+ "hooks_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/hooks",
+ "html_url": "https://github.com/nikita-tkachenko-datadog/ci-test-project",
+ "id": 586827266,
+ "is_template": false,
+ "issue_comment_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues/comments{/number}",
+ "issue_events_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues/events{/number}",
+ "issues_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/issues{/number}",
+ "keys_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/keys{/key_id}",
+ "labels_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/labels{/name}",
+ "language": "Shell",
+ "languages_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/languages",
+ "license": null,
+ "merges_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/merges",
+ "milestones_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/milestones{/number}",
+ "mirror_url": null,
+ "name": "ci-test-project",
+ "node_id": "R_kgDOIvpGAg",
+ "notifications_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/notifications{?since,all,participating}",
+ "open_issues": 1,
+ "open_issues_count": 1,
+ "owner": {
+ "avatar_url": "https://avatars.githubusercontent.com/u/121111529?v=4",
+ "events_url": "https://api.github.com/users/nikita-tkachenko-datadog/events{/privacy}",
+ "followers_url": "https://api.github.com/users/nikita-tkachenko-datadog/followers",
+ "following_url": "https://api.github.com/users/nikita-tkachenko-datadog/following{/other_user}",
+ "gists_url": "https://api.github.com/users/nikita-tkachenko-datadog/gists{/gist_id}",
+ "gravatar_id": "",
+ "html_url": "https://github.com/nikita-tkachenko-datadog",
+ "id": 121111529,
+ "login": "nikita-tkachenko-datadog",
+ "node_id": "U_kgDOBzgD6Q",
+ "organizations_url": "https://api.github.com/users/nikita-tkachenko-datadog/orgs",
+ "received_events_url": "https://api.github.com/users/nikita-tkachenko-datadog/received_events",
+ "repos_url": "https://api.github.com/users/nikita-tkachenko-datadog/repos",
+ "site_admin": false,
+ "starred_url": "https://api.github.com/users/nikita-tkachenko-datadog/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/nikita-tkachenko-datadog/subscriptions",
+ "type": "User",
+ "url": "https://api.github.com/users/nikita-tkachenko-datadog"
+ },
+ "private": true,
+ "pulls_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/pulls{/number}",
+ "pushed_at": "2024-09-11T15:12:25Z",
+ "releases_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/releases{/id}",
+ "size": 90,
+ "ssh_url": "git@github.com:nikita-tkachenko-datadog/ci-test-project.git",
+ "stargazers_count": 0,
+ "stargazers_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/stargazers",
+ "statuses_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/statuses/{sha}",
+ "subscribers_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/subscribers",
+ "subscription_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/subscription",
+ "svn_url": "https://github.com/nikita-tkachenko-datadog/ci-test-project",
+ "tags_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/tags",
+ "teams_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/teams",
+ "topics": [],
+ "trees_url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project/git/trees{/sha}",
+ "updated_at": "2024-09-11T13:41:11Z",
+ "url": "https://api.github.com/repos/nikita-tkachenko-datadog/ci-test-project",
+ "visibility": "private",
+ "watchers": 0,
+ "watchers_count": 0,
+ "web_commit_signoff_required": false
+ },
+ "sender": {
+ "avatar_url": "https://avatars.githubusercontent.com/u/121111529?v=4",
+ "events_url": "https://api.github.com/users/nikita-tkachenko-datadog/events{/privacy}",
+ "followers_url": "https://api.github.com/users/nikita-tkachenko-datadog/followers",
+ "following_url": "https://api.github.com/users/nikita-tkachenko-datadog/following{/other_user}",
+ "gists_url": "https://api.github.com/users/nikita-tkachenko-datadog/gists{/gist_id}",
+ "gravatar_id": "",
+ "html_url": "https://github.com/nikita-tkachenko-datadog",
+ "id": 121111529,
+ "login": "nikita-tkachenko-datadog",
+ "node_id": "U_kgDOBzgD6Q",
+ "organizations_url": "https://api.github.com/users/nikita-tkachenko-datadog/orgs",
+ "received_events_url": "https://api.github.com/users/nikita-tkachenko-datadog/received_events",
+ "repos_url": "https://api.github.com/users/nikita-tkachenko-datadog/repos",
+ "site_admin": false,
+ "starred_url": "https://api.github.com/users/nikita-tkachenko-datadog/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/nikita-tkachenko-datadog/subscriptions",
+ "type": "User",
+ "url": "https://api.github.com/users/nikita-tkachenko-datadog"
+ }
+}
diff --git a/internal/datastreams/processor.go b/internal/datastreams/processor.go
index 654c72dbb3..10a8b8afb3 100644
--- a/internal/datastreams/processor.go
+++ b/internal/datastreams/processor.go
@@ -340,7 +340,11 @@ func (p *Processor) Start() {
}
p.stop = make(chan struct{})
p.flushRequest = make(chan chan<- struct{})
- go p.reportStats()
+ p.wg.Add(1)
+ go func() {
+ defer p.wg.Done()
+ p.reportStats()
+ }()
p.wg.Add(1)
go func() {
defer p.wg.Done()
@@ -372,7 +376,14 @@ func (p *Processor) Stop() {
}
func (p *Processor) reportStats() {
- for range time.NewTicker(time.Second * 10).C {
+ tick := time.NewTicker(time.Second * 10)
+ defer tick.Stop()
+ for {
+ select {
+ case <-p.stop:
+ return
+ case <-tick.C:
+ }
p.statsd.Count("datadog.datastreams.processor.payloads_in", atomic.SwapInt64(&p.stats.payloadsIn, 0), nil, 1)
p.statsd.Count("datadog.datastreams.processor.flushed_payloads", atomic.SwapInt64(&p.stats.flushedPayloads, 0), nil, 1)
p.statsd.Count("datadog.datastreams.processor.flushed_buckets", atomic.SwapInt64(&p.stats.flushedBuckets, 0), nil, 1)
diff --git a/internal/exectracetest/go.mod b/internal/exectracetest/go.mod
index 2cf127f51a..8498e0f65b 100644
--- a/internal/exectracetest/go.mod
+++ b/internal/exectracetest/go.mod
@@ -1,8 +1,6 @@
module gopkg.in/DataDog/dd-trace-go.v1/internal/exectracetest
-go 1.21
-
-toolchain go1.21.0
+go 1.22.0
require (
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b
@@ -12,30 +10,36 @@ require (
)
require (
- github.com/DataDog/appsec-internal-go v1.7.0 // indirect
+ github.com/DataDog/appsec-internal-go v1.8.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 // indirect
- github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 // indirect
github.com/DataDog/datadog-go/v5 v5.3.0 // indirect
- github.com/DataDog/go-libddwaf/v3 v3.3.0 // indirect
+ github.com/DataDog/go-libddwaf/v3 v3.4.0 // indirect
github.com/DataDog/go-runtime-metrics-internal v0.0.0-20240819080326-9964da68e4b5 // indirect
- github.com/DataDog/go-tuf v1.0.2-0.5.2 // indirect
+ github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.5 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
+ github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect
github.com/ebitengine/purego v0.6.0-alpha.5 // indirect
github.com/google/uuid v1.5.0 // indirect
+ github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect
+ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
+ github.com/hashicorp/go-sockaddr v1.0.2 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/outcaste-io/ristretto v0.2.3 // indirect
- github.com/philhofer/fwd v1.1.2 // indirect
+ github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 // indirect
github.com/pkg/errors v0.9.1 // indirect
+ github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
- github.com/tinylib/msgp v1.1.8 // indirect
+ github.com/tinylib/msgp v1.2.1 // indirect
go.uber.org/atomic v1.11.0 // indirect
- golang.org/x/mod v0.17.0 // indirect
+ golang.org/x/mod v0.18.0 // indirect
golang.org/x/sync v0.7.0 // indirect
- golang.org/x/sys v0.20.0 // indirect
+ golang.org/x/sys v0.23.0 // indirect
golang.org/x/time v0.3.0 // indirect
- golang.org/x/tools v0.21.0 // indirect
+ golang.org/x/tools v0.22.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
google.golang.org/protobuf v1.33.0 // indirect
)
diff --git a/internal/exectracetest/go.sum b/internal/exectracetest/go.sum
index 0473e4d34a..22ae3d2db0 100644
--- a/internal/exectracetest/go.sum
+++ b/internal/exectracetest/go.sum
@@ -1,17 +1,17 @@
-github.com/DataDog/appsec-internal-go v1.7.0 h1:iKRNLih83dJeVya3IoUfK+6HLD/hQsIbyBlfvLmAeb0=
-github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g=
+github.com/DataDog/appsec-internal-go v1.8.0 h1:1Tfn3LEogntRqZtf88twSApOCAAO3V+NILYhuQIo4J4=
+github.com/DataDog/appsec-internal-go v1.8.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 h1:bUMSNsw1iofWiju9yc1f+kBd33E3hMJtq9GuU602Iy8=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0/go.mod h1:HzySONXnAgSmIQfL6gOv9hWprKJkx8CicuXuUbmgWfo=
-github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 h1:5nE6N3JSs2IG3xzMthNFhXfOaXlrsdgqmJ73lndFf8c=
-github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1/go.mod h1:Vc+snp0Bey4MrrJyiV2tVxxJb6BmLomPvN1RgAvjGaQ=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 h1:LplNAmMgZvGU7kKA0+4c1xWOjz828xweW5TCi8Mw9Q0=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0/go.mod h1:4Vo3SJ24uzfKHUHLoFa8t8o+LH+7TCQ7sPcZDtOpSP4=
github.com/DataDog/datadog-go/v5 v5.3.0 h1:2q2qjFOb3RwAZNU+ez27ZVDwErJv5/VpbBPprz7Z+s8=
github.com/DataDog/datadog-go/v5 v5.3.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q=
-github.com/DataDog/go-libddwaf/v3 v3.3.0 h1:jS72fuQpFgJZEdEJDmHJCPAgNTEMZoz1EUvimPUOiJ4=
-github.com/DataDog/go-libddwaf/v3 v3.3.0/go.mod h1:Bz/0JkpGf689mzbUjKJeheJINqsyyhM8p9PDuHdK2Ec=
+github.com/DataDog/go-libddwaf/v3 v3.4.0 h1:NJ2W2vhYaOm1OWr1LJCbdgp7ezG/XLJcQKBmjFwhSuM=
+github.com/DataDog/go-libddwaf/v3 v3.4.0/go.mod h1:n98d9nZ1gzenRSk53wz8l6d34ikxS+hs62A31Fqmyi4=
github.com/DataDog/go-runtime-metrics-internal v0.0.0-20240819080326-9964da68e4b5 h1:2S3vDq1CtlmVMdq0+7TIwYKUSDJmBEsaB9gdnGI52yE=
github.com/DataDog/go-runtime-metrics-internal v0.0.0-20240819080326-9964da68e4b5/go.mod h1:quaQJ+wPN41xEC458FCpTwyROZm3MzmTZ8q8XOXQiPs=
-github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I=
-github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
+github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4=
+github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4=
github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM=
github.com/DataDog/sketches-go v1.4.5 h1:ki7VfeNz7IcNafq7yI/j5U/YCkO3LJiMDtXz9OMQbyE=
@@ -19,6 +19,8 @@ github.com/DataDog/sketches-go v1.4.5/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -33,13 +35,11 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 h1:8EXxF+tCLqaVk8AOC29zl2mnhQjwyLxxOTuhUazWRsg=
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y0T1u5YjlyqC5GVArM7aNZRUYtTjmJ8mPJFds=
github.com/ebitengine/purego v0.6.0-alpha.5 h1:EYID3JOAdmQ4SNZYJHu9V6IqOeRQDBYxqKAg9PyoHFY=
github.com/ebitengine/purego v0.6.0-alpha.5/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ=
-github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
-github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
@@ -53,39 +53,51 @@ github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBB
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI=
github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0=
github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
-github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
-github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
+github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 h1:jYi87L8j62qkXzaYHAQAhEapgukhenIMZRBKTNRLHJ4=
+github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY=
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3/go.mod h1:vl5+MqJ1nBINuSsUI2mGgH79UweUT/B5Fy8857PqyyI=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
@@ -106,10 +118,9 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
-github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
+github.com/tinylib/msgp v1.2.1 h1:6ypy2qcCznxpP4hpORzhtXyTqrBs7cfM9MCCWY8zsmU=
+github.com/tinylib/msgp v1.2.1/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
@@ -117,28 +128,21 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
+golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
-golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
+golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -146,34 +150,26 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
-golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
+golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
-golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
-golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
+golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
+google.golang.org/grpc v1.57.1 h1:upNTNqv0ES+2ZOOqACwVtS3Il8M12/+Hz41RCPzAjQg=
+google.golang.org/grpc v1.57.1/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -182,11 +178,23 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
+lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
+modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y=
+modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0=
+modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI=
modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw=
modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
+modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ=
modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
+modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
+modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
+modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
diff --git a/internal/globalconfig/globalconfig.go b/internal/globalconfig/globalconfig.go
index 35626580aa..a36f50035f 100644
--- a/internal/globalconfig/globalconfig.go
+++ b/internal/globalconfig/globalconfig.go
@@ -80,7 +80,10 @@ func SetDogstatsdAddr(addr string) {
func StatsTags() []string {
cfg.mu.RLock()
defer cfg.mu.RUnlock()
- return cfg.statsTags
+ // Copy the slice before returning it, so that callers cannot pollute the underlying array
+ tags := make([]string, len(cfg.statsTags))
+ copy(tags, cfg.statsTags)
+ return tags
}
// SetStatsTags configures the list of tags that should be applied to contribs' statsd.Client as global tags
@@ -88,7 +91,10 @@ func StatsTags() []string {
func SetStatsTags(tags []string) {
cfg.mu.Lock()
defer cfg.mu.Unlock()
- cfg.statsTags = tags
+ // Copy the slice before setting it, so that any changes to the slice provided to SetStatsTags does not pollute the underlying array of statsTags
+ statsTags := make([]string, len(tags))
+ copy(statsTags, tags)
+ cfg.statsTags = statsTags
}
// RuntimeID returns this process's unique runtime id.
diff --git a/internal/globalconfig/globalconfig_test.go b/internal/globalconfig/globalconfig_test.go
index 66370d415d..cecf9c6dd1 100644
--- a/internal/globalconfig/globalconfig_test.go
+++ b/internal/globalconfig/globalconfig_test.go
@@ -18,3 +18,17 @@ func TestHeaderTag(t *testing.T) {
assert.Equal(t, "tag1", cfg.headersAsTags.Get("header1"))
assert.Equal(t, "tag2", cfg.headersAsTags.Get("header2"))
}
+
+// Assert that APIs to access cfg.statsTags protect against pollution from external changes
+func TestStatsTags(t *testing.T) {
+ array := [6]string{"aaa", "bbb", "ccc"}
+ slice1 := array[:]
+ SetStatsTags(slice1)
+ slice1 = append(slice1, []string{"ddd", "eee", "fff"}...)
+ slice1[0] = "zzz"
+ assert.Equal(t, cfg.statsTags[:3], []string{"aaa", "bbb", "ccc"})
+
+ tags := StatsTags()
+ tags[1] = "yyy"
+ assert.Equal(t, cfg.statsTags[1], "bbb")
+}
diff --git a/internal/log/log.go b/internal/log/log.go
index 383975147f..dc4549323c 100644
--- a/internal/log/log.go
+++ b/internal/log/log.go
@@ -39,6 +39,40 @@ type Logger interface {
Log(msg string)
}
+// File name for writing tracer logs, if DD_TRACE_LOG_DIRECTORY has been configured
+const LoggerFile = "ddtrace.log"
+
+// ManagedFile functions like a *os.File but is safe for concurrent use
+type ManagedFile struct {
+ mu sync.RWMutex
+ file *os.File
+ closed bool
+}
+
+// Close closes the ManagedFile's *os.File in a concurrent-safe manner, ensuring the file is closed only once
+func (m *ManagedFile) Close() error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if m.file == nil || m.closed {
+ return nil
+ }
+ err := m.file.Close()
+ if err != nil {
+ return err
+ }
+ m.closed = true
+ return nil
+}
+
+func (m *ManagedFile) Name() string {
+ m.mu.RLock()
+ defer m.mu.RUnlock()
+ if m.file == nil {
+ return ""
+ }
+ return m.file.Name()
+}
+
var (
mu sync.RWMutex // guards below fields
level = LevelWarn
@@ -58,6 +92,25 @@ func UseLogger(l Logger) (undo func()) {
}
}
+// OpenFileAtPath creates a new file at the specified dirPath and configures the logger to write to this file. The dirPath must already exist on the underlying os.
+// It returns the file that was created, or nil and an error if the file creation was unsuccessful.
+// The caller of OpenFileAtPath is responsible for calling Close() on the ManagedFile
+func OpenFileAtPath(dirPath string) (*ManagedFile, error) {
+ path, err := os.Stat(dirPath)
+ if err != nil || !path.IsDir() {
+ return nil, fmt.Errorf("file path %v invalid or does not exist on the underlying os; using default logger to stderr", dirPath)
+ }
+ filepath := dirPath + "/" + LoggerFile
+ f, err := os.OpenFile(filepath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+ if err != nil {
+ return nil, fmt.Errorf("using default logger to stderr due to error creating or opening log file: %v", err)
+ }
+ UseLogger(&defaultLogger{l: log.New(f, "", log.LstdFlags)})
+ return &ManagedFile{
+ file: f,
+ }, nil
+}
+
// SetLevel sets the given lvl for logging.
func SetLevel(lvl Level) {
mu.Lock()
diff --git a/internal/log/log_test.go b/internal/log/log_test.go
index 3840d5151b..1cfe67b0b0 100644
--- a/internal/log/log_test.go
+++ b/internal/log/log_test.go
@@ -6,7 +6,9 @@
package log
import (
+ "bytes"
"fmt"
+ "os"
"strings"
"sync"
"testing"
@@ -42,6 +44,76 @@ func (tp *testLogger) Reset() {
tp.mu.Unlock()
}
+func TestLogDirectory(t *testing.T) {
+ t.Run("invalid", func(t *testing.T) {
+ f, err := OpenFileAtPath("/some/nonexistent/path")
+ assert.Nil(t, f)
+ assert.Error(t, err)
+ })
+ t.Run("valid", func(t *testing.T) {
+ // ensure File is created successfully
+ dir, err := os.MkdirTemp("", "example")
+ if err != nil {
+ t.Fatalf("Failure creating directory %v", err)
+ }
+ f, err := OpenFileAtPath(dir)
+ assert.Nil(t, err)
+ fp := dir + "/" + LoggerFile
+ assert.NotNil(t, f.file)
+ assert.Equal(t, fp, f.file.Name())
+ assert.False(t, f.closed)
+
+ // ensure this setting plays nicely with other log features
+ oldLvl := level
+ SetLevel(LevelDebug)
+ defer func() {
+ SetLevel(oldLvl)
+ }()
+ Info("info!")
+ Warn("warn!")
+ Debug("debug!")
+ // shorten errrate to test Error() behavior in a reasonable amount of time
+ oldRate := errrate
+ errrate = time.Microsecond
+ defer func() {
+ errrate = oldRate
+ }()
+ Error("error!")
+ time.Sleep(1 * time.Second)
+
+ b, err := os.ReadFile(fp)
+ if err != nil {
+ t.Fatalf("Failure reading file: %v", err)
+ }
+ // convert file content to []string{}, split by \n, to easily check its contents
+ lines := bytes.Split(b, []byte{'\n'})
+ var logs []string
+ for _, line := range lines {
+ logs = append(logs, string(line))
+ }
+
+ assert.True(t, containsMessage("INFO", "info!", logs))
+ assert.True(t, containsMessage("WARN", "warn!", logs))
+ assert.True(t, containsMessage("DEBUG", "debug!", logs))
+ assert.True(t, containsMessage("ERROR", "error!", logs))
+
+ f.Close()
+ assert.True(t, f.closed)
+
+ //ensure f.Close() is concurrent-safe and free of deadlocks
+ var wg sync.WaitGroup
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ f.Close()
+ }()
+ }
+ wg.Wait()
+ assert.True(t, f.closed)
+ })
+}
+
func TestLog(t *testing.T) {
defer func(old Logger) { UseLogger(old) }(logger)
tp := &testLogger{}
@@ -195,3 +267,12 @@ func hasMsg(lvl, m string, lines []string) bool {
func msg(lvl, msg string) string {
return fmt.Sprintf("%s %s: %s", prefixMsg, lvl, msg)
}
+
+func containsMessage(lvl, m string, lines []string) bool {
+ for _, line := range lines {
+ if strings.Contains(line, msg(lvl, m)) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/internal/remoteconfig/remoteconfig.go b/internal/remoteconfig/remoteconfig.go
index 86b8f69231..4076c2ad34 100644
--- a/internal/remoteconfig/remoteconfig.go
+++ b/internal/remoteconfig/remoteconfig.go
@@ -70,16 +70,48 @@ const (
APMTracingHTTPHeaderTags
// APMTracingCustomTags enables APM client to set custom tags on all spans
APMTracingCustomTags
- // ASMRASPSSRF enables ASM support for runtime protection against SSRF attacks
- ASMRASPSSRF = 23
-)
-
-// Additional capability bit index values that are non-consecutive from above.
-const (
+ // ASMProcessorOverrides adds support for processor overrides through the ASM RC Product
+ ASMProcessorOverrides
+ // ASMCustomDataScanners adds support for custom data scanners through the ASM RC Product
+ ASMCustomDataScanners
+ // ASMExclusionData adds support configurable exclusion filter data from the ASM_DATA Product
+ ASMExclusionData
// APMTracingEnabled enables APM tracing
- APMTracingEnabled Capability = 19
+ APMTracingEnabled
+ // APMTracingDataStreamsEnabled enables Data Streams Monitoring
+ APMTracingDataStreamsEnabled
+ // ASMRASPSQLI enables ASM support for runtime protection against SQL Injection attacks
+ ASMRASPSQLI
+ // ASMRASPLFI enables ASM support for runtime protection against Local File Inclusion attacks
+ ASMRASPLFI
+ // ASMRASPSSRF enables ASM support for runtime protection against SSRF attacks
+ ASMRASPSSRF
+ // ASMRASPSHI enables ASM support for runtime protection against XSS attacks
+ ASMRASPSHI
+ // ASMRASPXXE enables ASM support for runtime protection against XXE attacks
+ ASMRASPXXE
+ // ASMRASPRCE enables ASM support for runtime protection against Remote Code Execution
+ ASMRASPRCE
+ // ASMRASPNOSQLI enables ASM support for runtime protection against NoSQL Injection attacks
+ ASMRASPNOSQLI
+ // ASMRASPXSS enables ASM support for runtime protection against Cross Site Scripting attacks
+ ASMRASPXSS
// APMTracingSampleRules represents the sampling rate using matching rules from APM client libraries
- APMTracingSampleRules = 29
+ APMTracingSampleRules
+ // CSMActivation represents the capability to activate CSM through remote configuration
+ CSMActivation
+ // ASMAutoUserInstrumMode represents the capability to enable the automatic user instrumentation mode
+ ASMAutoUserInstrumMode
+ // ASMEndpointFingerprinting represents the capability to enable endpoint fingerprinting
+ ASMEndpointFingerprinting
+ // ASMSessionFingerprinting represents the capability to enable session fingerprinting
+ ASMSessionFingerprinting
+ // ASMNetworkFingerprinting represents the capability to enable network fingerprinting
+ ASMNetworkFingerprinting
+ // ASMHeaderFingerprinting represents the capability to enable header fingerprinting
+ ASMHeaderFingerprinting
+ // ASMTruncationRules is the support for truncation payload rules
+ ASMTruncationRules
)
// ErrClientNotStarted is returned when the remote config client is not started.
diff --git a/internal/apps/setup-smoke-test/Dockerfile b/internal/setup-smoke-test/Dockerfile
similarity index 88%
rename from internal/apps/setup-smoke-test/Dockerfile
rename to internal/setup-smoke-test/Dockerfile
index 7828ed071a..4a113a03d9 100644
--- a/internal/apps/setup-smoke-test/Dockerfile
+++ b/internal/setup-smoke-test/Dockerfile
@@ -17,7 +17,7 @@
# select one by default, but also allows to provide a --build-arg option
# too instead of relying on the --target option. This way, the CI matrix
# can systematically use --build-arg for all of the parameters.
-ARG go="1.21" # golang docker image parameter in `golang:{go}-{buildenv}`
+ARG go="1.22" # golang docker image parameter in `golang:{go}-{buildenv}`
ARG build_env="bookworm" # golang docker image parameter in `golang:{go}-{buildenv}`
ARG build_with_cgo="0" # 0 or 1
ARG build_with_vendoring="" # y or empty
@@ -30,7 +30,7 @@ FROM golang:$go-$build_env AS build-env
WORKDIR /src
COPY . .
-WORKDIR /src/internal/apps/setup-smoke-test
+WORKDIR /src/internal/setup-smoke-test
ARG build_with_cgo
RUN go env -w CGO_ENABLED=$build_with_cgo
@@ -67,7 +67,7 @@ RUN ldd smoke-test || true
# this image to preperly highlight the fact that the compiled program is running
# out of the box in it without any further installation.
FROM debian:11 AS debian11
-COPY --from=build-env /src/internal/apps/setup-smoke-test/smoke-test /usr/local/bin
+COPY --from=build-env /src/internal/setup-smoke-test/smoke-test /usr/local/bin
CMD /usr/local/bin/smoke-test
# debian12 deployment environment
@@ -75,7 +75,7 @@ CMD /usr/local/bin/smoke-test
# this image to preperly highlight the fact that the compiled program is running
# out of the box in it without any further installation.
FROM debian:12 AS debian12
-COPY --from=build-env /src/internal/apps/setup-smoke-test/smoke-test /usr/local/bin
+COPY --from=build-env /src/internal/setup-smoke-test/smoke-test /usr/local/bin
CMD /usr/local/bin/smoke-test
# alpine deployment environment
@@ -87,7 +87,7 @@ ARG build_with_cgo
RUN set -ex; if [ "$build_with_cgo" = "1" ]; then \
apk update && apk add libc6-compat; \
fi
-COPY --from=build-env /src/internal/apps/setup-smoke-test/smoke-test /usr/local/bin
+COPY --from=build-env /src/internal/setup-smoke-test/smoke-test /usr/local/bin
CMD /usr/local/bin/smoke-test
# amazonlinux:2 deployment environment
@@ -95,7 +95,7 @@ CMD /usr/local/bin/smoke-test
# this image to preperly highlight the fact that the compiled program is running
# out of the box in it without any further installation.
FROM amazonlinux:2 AS al2
-COPY --from=build-env /src/internal/apps/setup-smoke-test/smoke-test /usr/local/bin
+COPY --from=build-env /src/internal/setup-smoke-test/smoke-test /usr/local/bin
CMD /usr/local/bin/smoke-test
# amazonlinux:2023 deployment environment
@@ -103,7 +103,7 @@ CMD /usr/local/bin/smoke-test
# this image to preperly highlight the fact that the compiled program is running
# out of the box in it without any further installation.
FROM amazonlinux:2023 AS al2023
-COPY --from=build-env /src/internal/apps/setup-smoke-test/smoke-test /usr/local/bin
+COPY --from=build-env /src/internal/setup-smoke-test/smoke-test /usr/local/bin
CMD /usr/local/bin/smoke-test
# busybox deployment environment
@@ -112,7 +112,7 @@ CMD /usr/local/bin/smoke-test
# out of the box in it without any further installation.
FROM busybox AS busybox
RUN mkdir -p /usr/local/bin
-COPY --from=build-env /src/internal/apps/setup-smoke-test/smoke-test /usr/local/bin
+COPY --from=build-env /src/internal/setup-smoke-test/smoke-test /usr/local/bin
CMD /usr/local/bin/smoke-test
# scratch deployment environment - meant to be used with CGO_ENABLED=0
@@ -120,7 +120,7 @@ CMD /usr/local/bin/smoke-test
# this image to preperly highlight the fact that the compiled program is running
# out of the box in it without any further installation.
FROM scratch AS scratch
-COPY --from=build-env /src/internal/apps/setup-smoke-test/smoke-test /
+COPY --from=build-env /src/internal/setup-smoke-test/smoke-test /
ENTRYPOINT [ "/smoke-test" ]
# Final deployment environment - helper target to end up a single one
diff --git a/internal/setup-smoke-test/go.mod b/internal/setup-smoke-test/go.mod
new file mode 100644
index 0000000000..2da1c47234
--- /dev/null
+++ b/internal/setup-smoke-test/go.mod
@@ -0,0 +1,42 @@
+module github.com/DataDog/dd-trace-go/internal/setup-smoke-test
+
+go 1.22.0
+
+require gopkg.in/DataDog/dd-trace-go.v1 v1.67.1
+
+require (
+ github.com/DataDog/appsec-internal-go v1.7.0 // indirect
+ github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 // indirect
+ github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 // indirect
+ github.com/DataDog/datadog-go/v5 v5.3.0 // indirect
+ github.com/DataDog/go-libddwaf/v3 v3.3.0 // indirect
+ github.com/DataDog/go-tuf v1.0.2-0.5.2 // indirect
+ github.com/DataDog/gostackparse v0.7.0 // indirect
+ github.com/DataDog/sketches-go v1.4.5 // indirect
+ github.com/Microsoft/go-winio v0.6.1 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/dustin/go-humanize v1.0.1 // indirect
+ github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect
+ github.com/ebitengine/purego v0.6.0-alpha.5 // indirect
+ github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect
+ github.com/google/uuid v1.5.0 // indirect
+ github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect
+ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
+ github.com/hashicorp/go-sockaddr v1.0.2 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/outcaste-io/ristretto v0.2.3 // indirect
+ github.com/philhofer/fwd v1.1.2 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 // indirect
+ github.com/ryanuber/go-glob v1.0.0 // indirect
+ github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
+ github.com/spaolacci/murmur3 v1.1.0 // indirect
+ github.com/tinylib/msgp v1.1.8 // indirect
+ go.uber.org/atomic v1.11.0 // indirect
+ golang.org/x/mod v0.14.0 // indirect
+ golang.org/x/sys v0.20.0 // indirect
+ golang.org/x/time v0.3.0 // indirect
+ golang.org/x/tools v0.16.1 // indirect
+ golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
+)
diff --git a/internal/setup-smoke-test/go.sum b/internal/setup-smoke-test/go.sum
new file mode 100644
index 0000000000..b19c19b177
--- /dev/null
+++ b/internal/setup-smoke-test/go.sum
@@ -0,0 +1,203 @@
+github.com/DataDog/appsec-internal-go v1.7.0 h1:iKRNLih83dJeVya3IoUfK+6HLD/hQsIbyBlfvLmAeb0=
+github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g=
+github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 h1:bUMSNsw1iofWiju9yc1f+kBd33E3hMJtq9GuU602Iy8=
+github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0/go.mod h1:HzySONXnAgSmIQfL6gOv9hWprKJkx8CicuXuUbmgWfo=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 h1:5nE6N3JSs2IG3xzMthNFhXfOaXlrsdgqmJ73lndFf8c=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1/go.mod h1:Vc+snp0Bey4MrrJyiV2tVxxJb6BmLomPvN1RgAvjGaQ=
+github.com/DataDog/datadog-go/v5 v5.3.0 h1:2q2qjFOb3RwAZNU+ez27ZVDwErJv5/VpbBPprz7Z+s8=
+github.com/DataDog/datadog-go/v5 v5.3.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q=
+github.com/DataDog/go-libddwaf/v3 v3.3.0 h1:jS72fuQpFgJZEdEJDmHJCPAgNTEMZoz1EUvimPUOiJ4=
+github.com/DataDog/go-libddwaf/v3 v3.3.0/go.mod h1:Bz/0JkpGf689mzbUjKJeheJINqsyyhM8p9PDuHdK2Ec=
+github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I=
+github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
+github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4=
+github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM=
+github.com/DataDog/sketches-go v1.4.5 h1:ki7VfeNz7IcNafq7yI/j5U/YCkO3LJiMDtXz9OMQbyE=
+github.com/DataDog/sketches-go v1.4.5/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg=
+github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 h1:8EXxF+tCLqaVk8AOC29zl2mnhQjwyLxxOTuhUazWRsg=
+github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y0T1u5YjlyqC5GVArM7aNZRUYtTjmJ8mPJFds=
+github.com/ebitengine/purego v0.6.0-alpha.5 h1:EYID3JOAdmQ4SNZYJHu9V6IqOeRQDBYxqKAg9PyoHFY=
+github.com/ebitengine/purego v0.6.0-alpha.5/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
+github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo=
+github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
+github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=
+github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
+github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
+github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0=
+github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
+github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
+github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY=
+github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3/go.mod h1:vl5+MqJ1nBINuSsUI2mGgH79UweUT/B5Fy8857PqyyI=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
+github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
+github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
+github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
+github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
+golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
+golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
+golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
+google.golang.org/grpc v1.57.1 h1:upNTNqv0ES+2ZOOqACwVtS3Il8M12/+Hz41RCPzAjQg=
+google.golang.org/grpc v1.57.1/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/DataDog/dd-trace-go.v1 v1.67.1 h1:frgcpZ18wmpj+/TwyDJM8057M65aOdgaxLiZ8pb1PFU=
+gopkg.in/DataDog/dd-trace-go.v1 v1.67.1/go.mod h1:6DdiJPKOeJfZyd/IUGCAd5elY8qPGkztK6wbYYsMjag=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw=
+modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
+modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
+modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
+modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
+modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
+modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ=
+modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
diff --git a/internal/apps/setup-smoke-test/main.go b/internal/setup-smoke-test/main.go
similarity index 100%
rename from internal/apps/setup-smoke-test/main.go
rename to internal/setup-smoke-test/main.go
diff --git a/internal/stacktrace/event.go b/internal/stacktrace/event.go
index ac292f13f6..0b6ff81882 100644
--- a/internal/stacktrace/event.go
+++ b/internal/stacktrace/event.go
@@ -27,6 +27,8 @@ const (
ExploitEvent EventCategory = "exploit"
)
+const SpanKey = "_dd.stack"
+
// Event is the toplevel structure to contain a stacktrace and the additional information needed to correlate it with other data
type Event struct {
// Category is a well-known type of the event, not optional
@@ -82,30 +84,32 @@ func WithID(id string) Options {
}
}
-// AddToSpan adds the event to the given span's root span as a tag if stacktrace collection is enabled
-func AddToSpan(span ddtrace.Span, events ...*Event) {
+// GetSpanValue returns the value to be set as a tag on a span for the given stacktrace events
+func GetSpanValue(events ...*Event) any {
if !Enabled() {
- return
+ return nil
}
- // TODO(eliott.bouhana): switch to a map[EventCategory][]*Event type when the tinylib/msgp@1.1.10 is out
- groupByCategory := make(map[string]any, 3)
-
+ groupByCategory := make(map[string][]*Event, 3)
for _, event := range events {
if _, ok := groupByCategory[string(event.Category)]; !ok {
groupByCategory[string(event.Category)] = []*Event{event}
continue
}
-
- groupByCategory[string(event.Category)] = append(groupByCategory[string(event.Category)].([]*Event), event)
+ groupByCategory[string(event.Category)] = append(groupByCategory[string(event.Category)], event)
}
+ return internal.MetaStructValue{Value: groupByCategory}
+}
+
+// AddToSpan adds the event to the given span's root span as a tag if stacktrace collection is enabled
+func AddToSpan(span ddtrace.Span, events ...*Event) {
+ value := GetSpanValue(events...)
type rooter interface {
Root() ddtrace.Span
}
if lrs, ok := span.(rooter); ok {
span = lrs.Root()
}
-
- span.SetTag("_dd.stack", internal.MetaStructValue{Value: groupByCategory})
+ span.SetTag(SpanKey, value)
}
diff --git a/internal/stacktrace/event_test.go b/internal/stacktrace/event_test.go
index a95db53633..0de38a04f9 100644
--- a/internal/stacktrace/event_test.go
+++ b/internal/stacktrace/event_test.go
@@ -8,8 +8,6 @@ package stacktrace
import (
"testing"
- "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
- ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/internal"
"github.com/stretchr/testify/require"
@@ -27,40 +25,29 @@ func TestNewEvent(t *testing.T) {
}
func TestEventToSpan(t *testing.T) {
- mt := mocktracer.Start()
- defer mt.Stop()
+ event1 := NewEvent(ExceptionEvent, WithMessage("message1"))
+ event2 := NewEvent(ExploitEvent, WithMessage("message2"))
+ spanValue := GetSpanValue(event1, event2)
- span := ddtracer.StartSpan("op")
- event := NewEvent(ExceptionEvent, WithMessage("message"))
- AddToSpan(span, event)
- span.Finish()
+ eventsMap := spanValue.(internal.MetaStructValue).Value.(map[string][]*Event)
+ require.Len(t, eventsMap, 2)
- spans := mt.FinishedSpans()
- require.Len(t, spans, 1)
- require.Equal(t, "op", spans[0].OperationName())
+ eventsCat := eventsMap[string(ExceptionEvent)]
+ require.Len(t, eventsCat, 1)
- eventsMap := spans[0].Tag("_dd.stack").(internal.MetaStructValue).Value.(map[string]any)
- require.Len(t, eventsMap, 1)
+ require.Equal(t, *event1, *eventsCat[0])
- eventsCat := eventsMap[string(ExceptionEvent)].([]*Event)
+ eventsCat = eventsMap[string(ExploitEvent)]
require.Len(t, eventsCat, 1)
- require.Equal(t, *event, *eventsCat[0])
+ require.Equal(t, *event2, *eventsCat[0])
}
func TestMsgPackSerialization(t *testing.T) {
- mt := mocktracer.Start()
- defer mt.Stop()
-
- span := ddtracer.StartSpan("op")
event := NewEvent(ExceptionEvent, WithMessage("message"), WithType("type"), WithID("id"))
- AddToSpan(span, event)
- span.Finish()
-
- spans := mt.FinishedSpans()
- require.Len(t, spans, 1)
+ spanValue := GetSpanValue(event)
- eventsMap := spans[0].Tag("_dd.stack").(internal.MetaStructValue).Value
+ eventsMap := spanValue.(internal.MetaStructValue).Value
_, err := msgp.AppendIntf(nil, eventsMap)
require.NoError(t, err)
diff --git a/internal/stacktrace/stacktrace.go b/internal/stacktrace/stacktrace.go
index 9b9ec8c525..060aebe12e 100644
--- a/internal/stacktrace/stacktrace.go
+++ b/internal/stacktrace/stacktrace.go
@@ -33,6 +33,7 @@ var (
"github.com/DataDog/datadog-agent",
"github.com/DataDog/appsec-internal-go",
"github.com/datadog/orchestrion",
+ "github.com/DataDog/orchestrion",
}
)
diff --git a/internal/telemetry/telemetry_test.go b/internal/telemetry/telemetry_test.go
index cad9cd43ea..87d40ff02a 100644
--- a/internal/telemetry/telemetry_test.go
+++ b/internal/telemetry/telemetry_test.go
@@ -112,6 +112,7 @@ func TestProductChange(t *testing.T) {
GlobalClient.ProductChange(NamespaceProfilers, true, []Configuration{{Name: "key", Value: "value"}})
},
},
+ /* This case is flaky (see #2688)
{
name: "profiler start, tracer start",
wantedMessages: []RequestType{RequestTypeAppStarted, RequestTypeDependenciesLoaded, RequestTypeAppClientConfigurationChange},
@@ -120,6 +121,7 @@ func TestProductChange(t *testing.T) {
GlobalClient.ProductChange(NamespaceTracers, true, []Configuration{{Name: "key", Value: "value"}})
},
},
+ */
}
for _, test := range tests {
diff --git a/internal/version/version.go b/internal/version/version.go
index 93f6ee1720..b7b24999c0 100644
--- a/internal/version/version.go
+++ b/internal/version/version.go
@@ -13,7 +13,7 @@ import (
// Tag specifies the current release tag. It needs to be manually
// updated. A test checks that the value of Tag never points to a
// git tag that is older than HEAD.
-const Tag = "v1.68.0-dev"
+const Tag = "v1.70.0-dev"
// Dissected version number. Filled during init()
var (
diff --git a/profiler/metrics.go b/profiler/metrics.go
index 2c60bff410..ec68c38592 100644
--- a/profiler/metrics.go
+++ b/profiler/metrics.go
@@ -61,12 +61,18 @@ func (m *metrics) reset(now time.Time) {
func (m *metrics) report(now time.Time, buf *bytes.Buffer) error {
period := now.Sub(m.collectedAt)
-
- if period < time.Second {
- // Profiler could be mis-configured to report more frequently than every second
- // or a system clock issue causes time to run backwards.
- // We can't emit valid metrics in either case.
- return collectionTooFrequent{min: time.Second, observed: period}
+ if period <= 0 {
+ // It is technically possible, though very unlikely, for period
+ // to be 0 if the monotonic clock did not advance at all or if
+ // we somehow collected two metrics profiles closer together
+ // than the clock can measure. If the period is negative, this
+ // might be a Go runtime bug, since time.Time.Sub is supposed to
+ // work with monotonic time. Either way, bail out since
+ // something is probably going wrong
+ return fmt.Errorf(
+ "unexpected duration %v between metrics collections, first at %v, second at %v",
+ period, m.collectedAt, now,
+ )
}
previousStats := m.snapshot
@@ -74,34 +80,31 @@ func (m *metrics) report(now time.Time, buf *bytes.Buffer) error {
points := m.compute(&previousStats, &m.snapshot, period, now)
data, err := json.Marshal(removeInvalid(points))
-
if err != nil {
- // NB the minimum period check and removeInvalid ensures we don't hit this case
- return err
- }
-
- if _, err := buf.Write(data); err != nil {
+ // NB removeInvalid ensures we don't hit this case by dropping inf/NaN
return err
}
- return nil
+ _, err = buf.Write(data)
+ return err
}
func computeMetrics(prev *metricsSnapshot, curr *metricsSnapshot, period time.Duration, now time.Time) []point {
+ periodSeconds := float64(period) / float64(time.Second)
return []point{
- {metric: "go_alloc_bytes_per_sec", value: rate(curr.TotalAlloc, prev.TotalAlloc, period/time.Second)},
- {metric: "go_allocs_per_sec", value: rate(curr.Mallocs, prev.Mallocs, period/time.Second)},
- {metric: "go_frees_per_sec", value: rate(curr.Frees, prev.Frees, period/time.Second)},
- {metric: "go_heap_growth_bytes_per_sec", value: rate(curr.HeapAlloc, prev.HeapAlloc, period/time.Second)},
- {metric: "go_gcs_per_sec", value: rate(uint64(curr.NumGC), uint64(prev.NumGC), period/time.Second)},
- {metric: "go_gc_pause_time", value: rate(curr.PauseTotalNs, prev.PauseTotalNs, period)}, // % of time spent paused
+ {metric: "go_alloc_bytes_per_sec", value: rate(curr.TotalAlloc, prev.TotalAlloc, periodSeconds)},
+ {metric: "go_allocs_per_sec", value: rate(curr.Mallocs, prev.Mallocs, periodSeconds)},
+ {metric: "go_frees_per_sec", value: rate(curr.Frees, prev.Frees, periodSeconds)},
+ {metric: "go_heap_growth_bytes_per_sec", value: rate(curr.HeapAlloc, prev.HeapAlloc, periodSeconds)},
+ {metric: "go_gcs_per_sec", value: rate(uint64(curr.NumGC), uint64(prev.NumGC), periodSeconds)},
+ {metric: "go_gc_pause_time", value: rate(curr.PauseTotalNs, prev.PauseTotalNs, float64(period))}, // % of time spent paused
{metric: "go_max_gc_pause_time", value: float64(maxPauseNs(&curr.MemStats, now.Add(-period)))},
{metric: "go_num_goroutine", value: float64(curr.NumGoroutine)},
}
}
-func rate(curr, prev uint64, period time.Duration) float64 {
- return float64(int64(curr)-int64(prev)) / float64(period)
+func rate(curr, prev uint64, period float64) float64 {
+ return float64(int64(curr)-int64(prev)) / period
}
// maxPauseNs returns maximum pause time within the recent period, assumes stats populated at period end
diff --git a/profiler/metrics_test.go b/profiler/metrics_test.go
index 2c7e4cbe92..37d1d29d6c 100644
--- a/profiler/metrics_test.go
+++ b/profiler/metrics_test.go
@@ -143,22 +143,3 @@ func TestMetricsReport(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, "[[\"metric_name\",1.1]]", buf.String())
}
-
-func TestMetricsCollectFrequency(t *testing.T) {
- now := now()
- var err error
- var buf bytes.Buffer
- m := newTestMetrics(now)
-
- err = m.report(now.Add(-time.Second), &buf)
- assert.Error(t, err, "collection call times must be monotonically increasing")
- assert.Empty(t, buf)
-
- err = m.report(now.Add(time.Second-1), &buf)
- assert.Error(t, err, "must be at least one second between collection calls")
- assert.Empty(t, buf)
-
- err = m.report(now.Add(time.Second), &buf)
- assert.NoError(t, err, "one second between calls should work")
- assert.NotEmpty(t, buf)
-}
diff --git a/profiler/profile.go b/profiler/profile.go
index 2aa0257f81..ee6d1825be 100644
--- a/profiler/profile.go
+++ b/profiler/profile.go
@@ -177,8 +177,11 @@ var profileTypes = map[ProfileType]profileType{
Filename: "metrics.json",
Collect: func(p *profiler) ([]byte, error) {
var buf bytes.Buffer
- p.interruptibleSleep(p.cfg.period)
+ interrupted := p.interruptibleSleep(p.cfg.period)
err := p.met.report(now(), &buf)
+ if err != nil && interrupted {
+ err = errProfilerStopped
+ }
return buf.Bytes(), err
},
},
diff --git a/profiler/profiler.go b/profiler/profiler.go
index 5e1d1736e8..0dba2062b8 100644
--- a/profiler/profiler.go
+++ b/profiler/profiler.go
@@ -36,6 +36,10 @@ var (
activeProfiler *profiler
containerID = internal.ContainerID() // replaced in tests
entityID = internal.EntityID() // replaced in tests
+
+ // errProfilerStopped is a sentinel for suppressng errors if we are
+ // about to stop the profiler
+ errProfilerStopped = errors.New("profiler stopped")
)
// Start starts the profiler. If the profiler is already running, it will be
@@ -343,9 +347,12 @@ func (p *profiler) collect(ticker <-chan time.Time) {
}
profs, err := p.runProfile(t)
if err != nil {
- log.Error("Error getting %s profile: %v; skipping.", t, err)
- tags := append(p.cfg.tags.Slice(), t.Tag())
- p.cfg.statsd.Count("datadog.profiling.go.collect_error", 1, tags, 1)
+ if err != errProfilerStopped {
+ log.Error("Error getting %s profile: %v; skipping.", t, err)
+ tags := append(p.cfg.tags.Slice(), t.Tag())
+ p.cfg.statsd.Count("datadog.profiling.go.collect_error", 1, tags, 1)
+ }
+ return
}
mu.Lock()
defer mu.Unlock()
@@ -480,10 +487,13 @@ func (p *profiler) outputDir(bat batch) error {
// interruptibleSleep sleeps for the given duration or until interrupted by the
// p.exit channel being closed.
-func (p *profiler) interruptibleSleep(d time.Duration) {
+// Returns whether the sleep was interrupted
+func (p *profiler) interruptibleSleep(d time.Duration) bool {
select {
case <-p.exit:
+ return true
case <-time.After(d):
+ return false
}
}
diff --git a/profiler/profiler_test.go b/profiler/profiler_test.go
index 844078948b..3d65fd8d58 100644
--- a/profiler/profiler_test.go
+++ b/profiler/profiler_test.go
@@ -30,6 +30,7 @@ import (
"gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig"
"gopkg.in/DataDog/dd-trace-go.v1/internal/httpmem"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion"
"gopkg.in/DataDog/dd-trace-go.v1/internal/traceprof"
"gopkg.in/DataDog/dd-trace-go.v1/internal/version"
@@ -377,6 +378,7 @@ func TestAllUploaded(t *testing.T) {
"delta-mutex.pprof",
"goroutines.pprof",
"goroutineswait.pprof",
+ "metrics.json",
}
if executionTraceEnabledDefault {
expected = append(expected, "go.trace")
@@ -748,3 +750,58 @@ func TestUDSDefault(t *testing.T) {
<-profiles
}
+
+func TestOrchestrionProfileInfo(t *testing.T) {
+ testCases := []struct {
+ env string
+ want string
+ }{
+ {want: "manual"},
+ {env: "1", want: "manual"},
+ {env: "true", want: "manual"},
+ {env: "auto", want: "auto"},
+ }
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("env=\"%s\"", tc.env), func(t *testing.T) {
+ t.Setenv("DD_PROFILING_ENABLED", tc.env)
+ p := doOneShortProfileUpload(t)
+ info := p.event.Info.Profiler
+ t.Logf("%+v", info)
+ if got := info.Activation; got != tc.want {
+ t.Errorf("wanted profiler activation \"%s\", got %s", tc.want, got)
+ }
+ want := "none"
+ if orchestrion.Enabled() {
+ want = "orchestrion"
+ }
+ if got := info.SSI.Mechanism; got != want {
+ t.Errorf("wanted profiler injected = %v, got %v", want, got)
+ }
+ })
+ }
+}
+
+func TestShortMetricsProfile(t *testing.T) {
+ profiles := startTestProfiler(t, 1, WithPeriod(10*time.Millisecond), WithProfileTypes(MetricsProfile))
+ for range 3 {
+ p := <-profiles
+ if _, ok := p.attachments["metrics.json"]; !ok {
+ t.Errorf("didn't get metrics profile, got %v", p.event.Attachments)
+ }
+ }
+}
+
+func TestMetricsProfileStopEarlyNoLog(t *testing.T) {
+ rl := new(log.RecordLogger)
+ defer log.UseLogger(rl)()
+ startTestProfiler(t, 1, WithPeriod(2*time.Second), WithProfileTypes(MetricsProfile))
+ // Stop the profiler immediately
+ Stop()
+ log.Flush()
+ for _, msg := range rl.Logs() {
+ // We should not see any error about stopping the metrics profile short
+ if strings.Contains(msg, "ERROR:") {
+ t.Errorf("unexpected error log: %s", msg)
+ }
+ }
+}
diff --git a/profiler/upload.go b/profiler/upload.go
index 29160ee040..a8b98f6560 100644
--- a/profiler/upload.go
+++ b/profiler/upload.go
@@ -16,10 +16,12 @@ import (
"mime/multipart"
"net/http"
"net/textproto"
+ "os"
"strings"
"time"
"gopkg.in/DataDog/dd-trace-go.v1/internal/log"
+ "gopkg.in/DataDog/dd-trace-go.v1/internal/orchestrion"
)
// maxRetries specifies the maximum number of retries to have when an error occurs.
@@ -144,6 +146,20 @@ type uploadEvent struct {
Version string `json:"version"`
EndpointCounts map[string]uint64 `json:"endpoint_counts,omitempty"`
CustomAttributes []string `json:"custom_attributes,omitempty"`
+ Info struct {
+ Profiler profilerInfo `json:"profiler"`
+ } `json:"info"`
+}
+
+// profilerInfo holds profiler-specific information which should be attached to
+// the event for backend consumption
+type profilerInfo struct {
+ SSI struct {
+ Mechanism string `json:"mechanism,omitempty"`
+ } `json:"ssi"`
+ // Activation distinguishes how the profiler was enabled, either "auto"
+ // (env var set via admission controller) or "manual"
+ Activation string `json:"activation"`
}
// encode encodes the profile as a multipart mime request.
@@ -167,6 +183,22 @@ func encode(bat batch, tags []string) (contentType string, body io.Reader, err e
CustomAttributes: bat.customAttributes,
}
+ // DD_PROFILING_ENABLED is only used to enable profiling when added with
+ // Orchestrion. The "auto" value comes from the Datadog Kubernetes
+ // admission controller. Otherwise, the client library doesn't care
+ // about the value and assumes it was something "truthy", or this code
+ // wouldn't run. We just track it to be consistent with other languages
+ if os.Getenv("DD_PROFILING_ENABLED") == "auto" {
+ event.Info.Profiler.Activation = "auto"
+ } else {
+ event.Info.Profiler.Activation = "manual"
+ }
+ if orchestrion.Enabled() {
+ event.Info.Profiler.SSI.Mechanism = "orchestrion"
+ } else {
+ event.Info.Profiler.SSI.Mechanism = "none"
+ }
+
for _, p := range bat.profiles {
event.Attachments = append(event.Attachments, p.name)
f, err := mw.CreateFormFile(p.name, p.name)