diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 3c4a198ec..a23c35c03 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -28,6 +28,15 @@ updates: schedule: interval: weekly day: sunday + - package-ecosystem: docker + directory: /examples/kafka-go + labels: + - dependencies + - docker + - Skip Changelog + schedule: + interval: weekly + day: sunday - package-ecosystem: docker directory: /examples/rolldice labels: @@ -64,6 +73,15 @@ updates: schedule: interval: weekly day: sunday + - package-ecosystem: docker + directory: /internal/test/e2e/kafka-go + labels: + - dependencies + - docker + - Skip Changelog + schedule: + interval: weekly + day: sunday - package-ecosystem: docker directory: /internal/test/e2e/nethttp labels: @@ -109,6 +127,15 @@ updates: schedule: interval: weekly day: sunday + - package-ecosystem: gomod + directory: /examples/kafka-go + labels: + - dependencies + - go + - Skip Changelog + schedule: + interval: weekly + day: sunday - package-ecosystem: gomod directory: /examples/rolldice labels: diff --git a/.github/workflows/e2e/k8s/sample-job.yml b/.github/workflows/e2e/k8s/sample-job.yml index f241f5bb2..610d66d0c 100644 --- a/.github/workflows/e2e/k8s/sample-job.yml +++ b/.github/workflows/e2e/k8s/sample-job.yml @@ -22,7 +22,7 @@ spec: imagePullPolicy: IfNotPresent command: ["/bin/sh", "-c"] # send SIGTERM to otel-go-instrumentation once the sample app has generated data so the job completes. - args: ["/sample-app/main && kill -TERM $(pidof otel-go-instrumentation)"] + args: ["(/sample-app/start.sh || /sample-app/main) && kill -TERM $(pidof otel-go-instrumentation)"] - name: auto-instrumentation image: otel-go-instrumentation imagePullPolicy: IfNotPresent diff --git a/.github/workflows/kind.yml b/.github/workflows/kind.yml index d28a0f049..5a85b126d 100644 --- a/.github/workflows/kind.yml +++ b/.github/workflows/kind.yml @@ -13,7 +13,7 @@ jobs: strategy: matrix: k8s-version: ["v1.26.0"] - library: ["nethttp", "nethttp_custom", "gin", "databasesql", "grpc", "otelglobal"] + library: ["nethttp", "nethttp_custom", "gin", "databasesql", "grpc", "otelglobal", "kafka-go"] runs-on: ubuntu-latest steps: - name: Checkout Repo @@ -59,7 +59,12 @@ jobs: repository: 'open-telemetry/opentelemetry-helm-charts' path: opentelemetry-helm-charts - name: Helm install collector - run: helm install test -f .github/workflows/e2e/k8s/collector-helm-values.yml opentelemetry-helm-charts/charts/opentelemetry-collector + run: | + if [ -f ./internal/test/e2e/${{ matrix.library }}/collector-helm-values.yml ]; then + helm install test -f ./internal/test/e2e/${{ matrix.library }}/collector-helm-values.yml opentelemetry-helm-charts/charts/opentelemetry-collector + else + helm install test -f .github/workflows/e2e/k8s/collector-helm-values.yml opentelemetry-helm-charts/charts/opentelemetry-collector + fi - name: check collector status # The loop is needed if the pod is not created yet # once https://github.com/kubernetes/kubectl/issues/1516 is fixed we can remove the loop diff --git a/CHANGELOG.md b/CHANGELOG.md index 4460ee43a..503c04ee5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,12 @@ OpenTelemetry Go Automatic Instrumentation adheres to [Semantic Versioning](http ## [Unreleased] +### Added + +- kafka-go instrumentation ([#709](https://github.com/open-telemetry/opentelemetry-go-instrumentation/pull/709)) + ### Fixed + - Change HTTP client span name to `{http.request.method}` ([#775](https://github.com/open-telemetry/opentelemetry-go-instrumentation/pull/775)) diff --git a/Makefile b/Makefile index 6baeecd6d..d37132b34 100644 --- a/Makefile +++ b/Makefile @@ -151,13 +151,14 @@ license-header-check: exit 1; \ fi -.PHONY: fixture-nethttp fixture-gin fixture-databasesql fixture-nethttp-custom fixture-otelglobal +.PHONY: fixture-nethttp fixture-gin fixture-databasesql fixture-nethttp-custom fixture-otelglobal fixture-kafka-go fixture-nethttp-custom: fixtures/nethttp_custom fixture-nethttp: fixtures/nethttp fixture-gin: fixtures/gin fixture-databasesql: fixtures/databasesql fixture-grpc: fixtures/grpc fixture-otelglobal: fixtures/otelglobal +fixture-kafka-go: fixtures/kafka-go fixtures/%: LIBRARY=$* fixtures/%: $(MAKE) docker-build @@ -168,7 +169,15 @@ fixtures/%: if [ ! -d "opentelemetry-helm-charts" ]; then \ git clone https://github.com/open-telemetry/opentelemetry-helm-charts.git; \ fi - helm install test -f .github/workflows/e2e/k8s/collector-helm-values.yml opentelemetry-helm-charts/charts/opentelemetry-collector + if [ -f ./internal/test/e2e/$(LIBRARY)/collector-helm-values.yml ]; then \ + helm install test -f ./internal/test/e2e/$(LIBRARY)/collector-helm-values.yml opentelemetry-helm-charts/charts/opentelemetry-collector; \ + else \ + helm install test -f .github/workflows/e2e/k8s/collector-helm-values.yml opentelemetry-helm-charts/charts/opentelemetry-collector; \ + fi + while : ; do \ + kubectl get pod/test-opentelemetry-collector-0 && break; \ + sleep 5; \ + done kubectl wait --for=condition=Ready --timeout=60s pod/test-opentelemetry-collector-0 kubectl -n default create -f .github/workflows/e2e/k8s/sample-job.yml if kubectl wait --for=condition=Complete --timeout=60s job/sample-job; then \ @@ -178,7 +187,7 @@ fixtures/%: else \ kubectl logs -l app=sample -c auto-instrumentation; \ fi - kind delete cluster + kind delete cluster .PHONY: prerelease prerelease: | $(MULTIMOD) diff --git a/examples/kafka-go/Dockerfile b/examples/kafka-go/Dockerfile new file mode 100644 index 000000000..8806d1ed5 --- /dev/null +++ b/examples/kafka-go/Dockerfile @@ -0,0 +1,5 @@ +FROM golang:1.22.0 +WORKDIR /app +COPY . . +RUN go build -o main +ENTRYPOINT ["/app/main"] diff --git a/examples/kafka-go/README.md b/examples/kafka-go/README.md new file mode 100644 index 000000000..9d43a2225 --- /dev/null +++ b/examples/kafka-go/README.md @@ -0,0 +1,18 @@ +# Example of Auto instrumentation of HTTP server + Kafka producer + Kafka consumer + Manual span + +This example shows a trace being generated which is composed of a HTTP server handler which produces +a batch of 2 message to different kafka topics, and a single consumer consuming one of these messages. +The consumer generate a manual span for each message it handles, this span is visible as the son of the consumer span. + +To run the example, bring up the services using the command. + +``` +docker compose up +``` + +After the services are up, you can hit the server using the below command +``` +curl localhost:8080/produce +``` +Which will produce a batch of 2 messages. +Every hit to the server should generate a trace that we can observe in [Jaeger UI](http://localhost:16686/) diff --git a/examples/kafka-go/docker-compose.yml b/examples/kafka-go/docker-compose.yml new file mode 100644 index 000000000..48bca9399 --- /dev/null +++ b/examples/kafka-go/docker-compose.yml @@ -0,0 +1,104 @@ +version: "3.9" + +networks: + default: + name: roll + driver: bridge + +volumes: + debugfs: + driver: local + driver_opts: + type: debugfs + device: debugfs + o: uid=0,gid=0,mode=755 + +services: + kafka: + depends_on: + - zookeeper + image: wurstmeister/kafka:2.12-2.3.1 + restart: on-failure:3 + links: + - zookeeper + expose: + - 9092 + ports: + - 9092:9092 + - 9093:9093 + environment: + KAFKA_VERSION: '2.3.1' + KAFKA_BROKER_ID: '1' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_ADVERTISED_HOST_NAME: 'kafka' + KAFKA_ADVERTISED_PORT: '9092' + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' + KAFKA_MESSAGE_MAX_BYTES: '200000000' + KAFKA_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093' + KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka:9092,SASL_PLAINTEXT://kafka:9093' + KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512' + KAFKA_AUTHORIZER_CLASS_NAME: 'kafka.security.auth.SimpleAclAuthorizer' + KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true' + KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf" + CUSTOM_INIT_SCRIPT: |- + echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/kafka/config/kafka_server_jaas.conf; + /opt/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]' --entity-type users --entity-name adminscram + healthcheck: + test: ["CMD", "kafka-topics.sh", "--list", "--zookeeper", "zookeeper:2181"] + interval: 5s + timeout: 10s + retries: 5 + + zookeeper: + image: wurstmeister/zookeeper + expose: + - "2181" + ports: + - 2181:2181 + + kafkaapp: + depends_on: + kafka: + condition: service_healthy + build: + context: . + dockerfile: ./Dockerfile + pid: "host" + ports: + - "8080:8080" + volumes: + - /proc:/host/proc + + go-auto: + depends_on: + - kafkaapp + build: + context: ../.. + dockerfile: Dockerfile + privileged: true + pid: "host" + environment: + - OTEL_EXPORTER_OTLP_ENDPOINT=http://jaeger:4318 + - OTEL_GO_AUTO_TARGET_EXE=/app/main + - OTEL_SERVICE_NAME=kafkaapp + - OTEL_PROPAGATORS=tracecontext,baggage + - OTEL_GO_AUTO_SHOW_VERIFIER_LOG=true + volumes: + - /proc:/host/proc + - debugfs:/sys/kernel/debug + command: ["/otel-go-instrumentation", "-global-impl"] + + jaeger: + image: jaegertracing/all-in-one:latest + ports: + - "16686:16686" + - "14268:14268" + environment: + - COLLECTOR_OTLP_ENABLED=true + - LOG_LEVEL=debug + deploy: + resources: + limits: + memory: 300M + restart: unless-stopped diff --git a/examples/kafka-go/go.mod b/examples/kafka-go/go.mod new file mode 100644 index 000000000..6c5fda136 --- /dev/null +++ b/examples/kafka-go/go.mod @@ -0,0 +1,17 @@ +module go.opentelemetry.io/auto/examples/kafka-go + +go 1.22.0 + +require ( + github.com/segmentio/kafka-go v0.4.47 + go.opentelemetry.io/otel v1.25.0 +) + +require ( + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/klauspost/compress v1.15.9 // indirect + github.com/pierrec/lz4/v4 v4.1.15 // indirect + go.opentelemetry.io/otel/metric v1.25.0 // indirect + go.opentelemetry.io/otel/trace v1.25.0 // indirect +) diff --git a/examples/kafka-go/go.sum b/examples/kafka-go/go.sum new file mode 100644 index 000000000..071e75ce2 --- /dev/null +++ b/examples/kafka-go/go.sum @@ -0,0 +1,82 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0= +github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= +go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= +go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= +go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= +go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM= +go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/examples/kafka-go/main.go b/examples/kafka-go/main.go new file mode 100644 index 000000000..05a593261 --- /dev/null +++ b/examples/kafka-go/main.go @@ -0,0 +1,145 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "fmt" + "io" + "log" + "net/http" + "time" + + kafka "github.com/segmentio/kafka-go" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" +) + +var tracer = otel.Tracer("trace-example") + +type server struct { + kafkaWriter *kafka.Writer +} + +func (s *server) producerHandler(wrt http.ResponseWriter, req *http.Request) { + body, err := io.ReadAll(req.Body) + if err != nil { + fmt.Printf("failed to read request body: %v\n", err) + wrt.WriteHeader(http.StatusBadRequest) + return + } + msg1 := kafka.Message{ + Key: []byte("key1"), + Value: body, + Topic: "topic1", + Headers: []kafka.Header{ + { + Key: "header1", + Value: []byte("value1"), + }, + }, + } + msg2 := kafka.Message{ + Key: []byte("key2"), + Topic: "topic2", + Value: body, + } + msgs := []kafka.Message{msg1, msg2} + err = s.kafkaWriter.WriteMessages(req.Context(), + msgs..., + ) + + if err != nil { + _, err1 := wrt.Write([]byte(err.Error())) + if err1 != nil { + fmt.Printf("failed to write response: %v\n", err1) + wrt.WriteHeader(http.StatusInternalServerError) + } + return + } + + fmt.Fprintf(wrt, "message sent to kafka") +} + +func getKafkaWriter() *kafka.Writer { + return &kafka.Writer{ + Addr: kafka.TCP("kafka:9092"), + Balancer: &kafka.LeastBytes{}, + RequiredAcks: 1, + Async: true, + WriteBackoffMax: 1 * time.Millisecond, + BatchTimeout: 1 * time.Millisecond, + } +} + +func getKafkaReader() *kafka.Reader { + return kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"kafka:9092"}, + GroupID: "some group id", + Topic: "topic1", + ReadBatchTimeout: 1 * time.Millisecond, + }) +} + +func reader() { + reader := getKafkaReader() + + defer reader.Close() + ctx := context.Background() + + fmt.Println("start consuming ... !!") + for { + m, err := reader.ReadMessage(ctx) + if err != nil { + fmt.Printf("failed to read message: %v\n", err) + continue + } + _, span := tracer.Start(ctx, "consumer manual span") + span.SetAttributes( + attribute.String("topic", m.Topic), + attribute.Int64("partition", int64(m.Partition)), + attribute.Int64("offset", int64(m.Offset)), + ) + fmt.Printf("consumed message at topic:%v partition:%v offset:%v %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value)) + span.End() + } +} + +func main() { + kafkaWriter := getKafkaWriter() + defer kafkaWriter.Close() + + _, err := kafka.DialLeader(context.Background(), "tcp", "kafka:9092", "topic1", 0) + if err != nil { + panic(err.Error()) + } + + _, err = kafka.DialLeader(context.Background(), "tcp", "kafka:9092", "topic2", 0) + if err != nil { + panic(err.Error()) + } + + time.Sleep(5 * time.Second) + go reader() + + s := &server{kafkaWriter: kafkaWriter} + + // Add handle func for producer. + http.HandleFunc("/produce", s.producerHandler) + + // Run the web server. + fmt.Println("start producer-api ... !!") + log.Fatal(http.ListenAndServe(":8080", nil)) +} diff --git a/go.mod b/go.mod index 9062c458c..5a33f15da 100644 --- a/go.mod +++ b/go.mod @@ -26,6 +26,7 @@ require ( github.com/hashicorp/go-version v1.6.0 github.com/mattn/go-sqlite3 v1.14.22 github.com/pkg/errors v0.9.1 + github.com/segmentio/kafka-go v0.4.47 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/contrib/exporters/autoexport v0.50.0 go.opentelemetry.io/otel v1.25.0 @@ -55,12 +56,14 @@ require ( github.com/goccy/go-json v0.10.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.15.9 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/leodido/go-urn v1.2.4 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pierrec/lz4/v4 v4.1.15 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.19.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect diff --git a/go.sum b/go.sum index 60cb3d7eb..670bba1d4 100644 --- a/go.sum +++ b/go.sum @@ -49,6 +49,8 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= @@ -69,6 +71,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -83,6 +87,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0= +github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -99,6 +105,13 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/contrib/exporters/autoexport v0.50.0 h1:1s7IQX8U74fRaZ5NwZPUlEVt9/RPNk+rGGoVcRZ3VR4= go.opentelemetry.io/contrib/exporters/autoexport v0.50.0/go.mod h1:f2RaTqHsQ4kC+NshKWWxlPP88QcIjDDpWso6m8VUTl8= go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= @@ -138,18 +151,57 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI= golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= diff --git a/internal/include/go_types.h b/internal/include/go_types.h index 486dfc4fd..45861a245 100644 --- a/internal/include/go_types.h +++ b/internal/include/go_types.h @@ -36,13 +36,6 @@ struct go_slice s64 cap; }; -struct go_slice_user_ptr -{ - void *array; - void *len; - void *cap; -}; - struct go_iface { void *tab; @@ -100,25 +93,30 @@ static __always_inline struct go_string write_user_go_string(char *str, u32 len) return new_string; } -static __always_inline void append_item_to_slice(struct go_slice *slice, void *new_item, u32 item_size, struct go_slice_user_ptr *slice_user_ptr) +static __always_inline void append_item_to_slice(void *new_item, u32 item_size, void *slice_user_ptr) { - u64 slice_len = slice->len; - u64 slice_cap = slice->cap; - if (slice_len < slice_cap) + // read the slice descriptor + struct go_slice slice = {0}; + bpf_probe_read(&slice, sizeof(slice), slice_user_ptr); + long res = 0; + + u64 slice_len = slice.len; + u64 slice_cap = slice.cap; + if (slice_len < slice_cap && slice.array != NULL) { - // Room available on current array - bpf_probe_write_user(slice->array + (item_size * slice_len), new_item, item_size); + // Room available on current array, append to the underlying array + res = bpf_probe_write_user(slice.array + (item_size * slice_len), new_item, item_size); } else { - //No room on current array - try to copy new one of size item_size * (len + 1) + // No room on current array - try to copy new one of size item_size * (len + 1) u32 alloc_size = item_size * slice_len; if (alloc_size >= MAX_SLICE_ARRAY_SIZE) { return; } - // Get buffer + // Get temporary buffer u32 index = 0; struct slice_array_buff *map_buff = bpf_map_lookup_elem(&slice_array_buff_map, &index); if (!map_buff) @@ -135,7 +133,10 @@ static __always_inline void append_item_to_slice(struct go_slice *slice, void *n return; } // Append to buffer - bpf_probe_read_user(new_slice_array, alloc_size, slice->array); + if (slice.array != NULL) { + bpf_probe_read_user(new_slice_array, alloc_size, slice.array); + bpf_printk("append_item_to_slice: copying %d bytes to new array from address 0x%llx", alloc_size, slice.array); + } copy_byte_arrays(new_item, new_slice_array + alloc_size, item_size); // Copy buffer to userspace @@ -149,30 +150,16 @@ static __always_inline void append_item_to_slice(struct go_slice *slice, void *n } // Update array pointer of slice - slice->array = new_array; - long success = bpf_probe_write_user(slice_user_ptr->array, &slice->array, sizeof(slice->array)); - if (success != 0) - { - bpf_printk("append_item_to_slice: failed to update array pointer in userspace"); - return; - } - - // Update cap - slice_cap++; - success = bpf_probe_write_user(slice_user_ptr->cap, &slice_cap, sizeof(slice_cap)); - if (success != 0) - { - bpf_printk("append_item_to_slice: failed to update cap in userspace"); - return; - } + slice.array = new_array; + slice.cap++; } // Update len - slice_len++; - long success = bpf_probe_write_user(slice_user_ptr->len, &slice_len, sizeof(slice_len)); + slice.len++; + long success = bpf_probe_write_user(slice_user_ptr, &slice, sizeof(slice)); if (success != 0) { - bpf_printk("append_item_to_slice: failed to update len in userspace"); + bpf_printk("append_item_to_slice: failed to update slice in userspace"); return; } } diff --git a/internal/include/span_context.h b/internal/include/span_context.h index 4395b020d..c43bd0bcd 100644 --- a/internal/include/span_context.h +++ b/internal/include/span_context.h @@ -18,6 +18,8 @@ #include "utils.h" #define SPAN_CONTEXT_STRING_SIZE 55 +#define W3C_KEY_LENGTH 11 // length of the "traceparent" key +#define W3C_VAL_LENGTH 55 struct span_context { diff --git a/internal/pkg/inject/offset_results.json b/internal/pkg/inject/offset_results.json index df439c7ab..209cf2171 100644 --- a/internal/pkg/inject/offset_results.json +++ b/internal/pkg/inject/offset_results.json @@ -1,4 +1,825 @@ [ + { + "module": "github.com/segmentio/kafka-go", + "packages": [ + { + "package": "github.com/segmentio/kafka-go", + "structs": [ + { + "struct": "Conn", + "fields": [ + { + "field": "clientID", + "offsets": [ + { + "offset": 328, + "versions": [ + "0.1.0", + "0.2.0", + "0.2.1", + "0.2.2", + "0.2.3", + "0.2.4", + "0.2.5", + "0.3.0" + ] + }, + { + "offset": 360, + "versions": [ + "0.3.1", + "0.3.2", + "0.3.3" + ] + }, + { + "offset": 368, + "versions": [ + "0.3.4", + "0.3.5", + "0.3.6", + "0.3.7", + "0.3.8", + "0.3.9", + "0.3.10", + "0.4.0", + "0.4.1", + "0.4.2", + "0.4.3", + "0.4.4", + "0.4.5", + "0.4.6", + "0.4.7", + "0.4.8", + "0.4.9", + "0.4.10", + "0.4.11", + "0.4.12", + "0.4.13", + "0.4.14", + "0.4.15", + "0.4.16", + "0.4.17", + "0.4.18", + "0.4.19", + "0.4.20", + "0.4.21", + "0.4.22", + "0.4.23", + "0.4.24", + "0.4.25", + "0.4.26", + "0.4.27", + "0.4.28", + "0.4.29", + "0.4.30", + "0.4.31", + "0.4.32-msk-iam", + "0.4.32", + "0.4.33", + "0.4.34", + "0.4.35", + "0.4.38", + "0.4.39", + "0.4.40", + "0.4.41", + "0.4.42", + "0.4.43", + "0.4.44", + "0.4.45", + "0.4.46", + "0.4.47" + ] + } + ] + } + ] + }, + { + "struct": "Message", + "fields": [ + { + "field": "Headers", + "offsets": [ + { + "offset": 80, + "versions": [ + "0.2.3", + "0.2.4", + "0.2.5", + "0.3.0", + "0.3.1", + "0.3.2", + "0.3.3", + "0.3.4", + "0.3.5", + "0.3.6", + "0.3.7", + "0.3.8", + "0.3.9", + "0.3.10", + "0.4.0", + "0.4.1", + "0.4.2", + "0.4.3", + "0.4.4", + "0.4.5", + "0.4.6", + "0.4.7", + "0.4.8", + "0.4.9", + "0.4.10" + ] + }, + { + "offset": 88, + "versions": [ + "0.4.11", + "0.4.12", + "0.4.13", + "0.4.14", + "0.4.15", + "0.4.16", + "0.4.17", + "0.4.18", + "0.4.19", + "0.4.20", + "0.4.21", + "0.4.22", + "0.4.23", + "0.4.24", + "0.4.25", + "0.4.26", + "0.4.27", + "0.4.28", + "0.4.29", + "0.4.30", + "0.4.31", + "0.4.32-msk-iam", + "0.4.32", + "0.4.33", + "0.4.34", + "0.4.35", + "0.4.38", + "0.4.39", + "0.4.40", + "0.4.41", + "0.4.42", + "0.4.43", + "0.4.44", + "0.4.45", + "0.4.46", + "0.4.47" + ] + } + ] + }, + { + "field": "Key", + "offsets": [ + { + "offset": 32, + "versions": [ + "0.1.0", + "0.2.0", + "0.2.1", + "0.2.2", + "0.2.3", + "0.2.4", + "0.2.5", + "0.3.0", + "0.3.1", + "0.3.2", + "0.3.3", + "0.3.4", + "0.3.5", + "0.3.6", + "0.3.7", + "0.3.8", + "0.3.9", + "0.3.10", + "0.4.0", + "0.4.1", + "0.4.2", + "0.4.3", + "0.4.4", + "0.4.5", + "0.4.6", + "0.4.7", + "0.4.8", + "0.4.9", + "0.4.10" + ] + }, + { + "offset": 40, + "versions": [ + "0.4.11", + "0.4.12", + "0.4.13", + "0.4.14", + "0.4.15", + "0.4.16", + "0.4.17", + "0.4.18", + "0.4.19", + "0.4.20", + "0.4.21", + "0.4.22", + "0.4.23", + "0.4.24", + "0.4.25", + "0.4.26", + "0.4.27", + "0.4.28", + "0.4.29", + "0.4.30", + "0.4.31", + "0.4.32-msk-iam", + "0.4.32", + "0.4.33", + "0.4.34", + "0.4.35", + "0.4.38", + "0.4.39", + "0.4.40", + "0.4.41", + "0.4.42", + "0.4.43", + "0.4.44", + "0.4.45", + "0.4.46", + "0.4.47" + ] + } + ] + }, + { + "field": "Offset", + "offsets": [ + { + "offset": 24, + "versions": [ + "0.1.0", + "0.2.0", + "0.2.1", + "0.2.2", + "0.2.3", + "0.2.4", + "0.2.5", + "0.3.0", + "0.3.1", + "0.3.2", + "0.3.3", + "0.3.4", + "0.3.5", + "0.3.6", + "0.3.7", + "0.3.8", + "0.3.9", + "0.3.10", + "0.4.0", + "0.4.1", + "0.4.2", + "0.4.3", + "0.4.4", + "0.4.5", + "0.4.6", + "0.4.7", + "0.4.8", + "0.4.9", + "0.4.10", + "0.4.11", + "0.4.12", + "0.4.13", + "0.4.14", + "0.4.15", + "0.4.16", + "0.4.17", + "0.4.18", + "0.4.19", + "0.4.20", + "0.4.21", + "0.4.22", + "0.4.23", + "0.4.24", + "0.4.25", + "0.4.26", + "0.4.27", + "0.4.28", + "0.4.29", + "0.4.30", + "0.4.31", + "0.4.32-msk-iam", + "0.4.32", + "0.4.33", + "0.4.34", + "0.4.35", + "0.4.38", + "0.4.39", + "0.4.40", + "0.4.41", + "0.4.42", + "0.4.43", + "0.4.44", + "0.4.45", + "0.4.46", + "0.4.47" + ] + } + ] + }, + { + "field": "Partition", + "offsets": [ + { + "offset": 16, + "versions": [ + "0.1.0", + "0.2.0", + "0.2.1", + "0.2.2", + "0.2.3", + "0.2.4", + "0.2.5", + "0.3.0", + "0.3.1", + "0.3.2", + "0.3.3", + "0.3.4", + "0.3.5", + "0.3.6", + "0.3.7", + "0.3.8", + "0.3.9", + "0.3.10", + "0.4.0", + "0.4.1", + "0.4.2", + "0.4.3", + "0.4.4", + "0.4.5", + "0.4.6", + "0.4.7", + "0.4.8", + "0.4.9", + "0.4.10", + "0.4.11", + "0.4.12", + "0.4.13", + "0.4.14", + "0.4.15", + "0.4.16", + "0.4.17", + "0.4.18", + "0.4.19", + "0.4.20", + "0.4.21", + "0.4.22", + "0.4.23", + "0.4.24", + "0.4.25", + "0.4.26", + "0.4.27", + "0.4.28", + "0.4.29", + "0.4.30", + "0.4.31", + "0.4.32-msk-iam", + "0.4.32", + "0.4.33", + "0.4.34", + "0.4.35", + "0.4.38", + "0.4.39", + "0.4.40", + "0.4.41", + "0.4.42", + "0.4.43", + "0.4.44", + "0.4.45", + "0.4.46", + "0.4.47" + ] + } + ] + }, + { + "field": "Time", + "offsets": [ + { + "offset": 80, + "versions": [ + "0.1.0", + "0.2.0", + "0.2.1", + "0.2.2" + ] + }, + { + "offset": 104, + "versions": [ + "0.2.3", + "0.2.4", + "0.2.5", + "0.3.0", + "0.3.1", + "0.3.2", + "0.3.3", + "0.3.4", + "0.3.5", + "0.3.6", + "0.3.7", + "0.3.8", + "0.3.9", + "0.3.10", + "0.4.0", + "0.4.1", + "0.4.2", + "0.4.3", + "0.4.4", + "0.4.5", + "0.4.6", + "0.4.7", + "0.4.8", + "0.4.9", + "0.4.10" + ] + }, + { + "offset": 112, + "versions": [ + "0.4.11", + "0.4.12", + "0.4.13", + "0.4.14", + "0.4.15", + "0.4.16", + "0.4.17", + "0.4.18", + "0.4.19", + "0.4.20", + "0.4.21", + "0.4.22", + "0.4.23", + "0.4.24", + "0.4.25", + "0.4.26", + "0.4.27", + "0.4.28", + "0.4.29", + "0.4.30", + "0.4.31", + "0.4.32-msk-iam", + "0.4.32", + "0.4.33", + "0.4.34", + "0.4.35", + "0.4.38" + ] + }, + { + "offset": 128, + "versions": [ + "0.4.39", + "0.4.40", + "0.4.41", + "0.4.42", + "0.4.43", + "0.4.44", + "0.4.45", + "0.4.46", + "0.4.47" + ] + } + ] + }, + { + "field": "Topic", + "offsets": [ + { + "offset": 0, + "versions": [ + "0.1.0", + "0.2.0", + "0.2.1", + "0.2.2", + "0.2.3", + "0.2.4", + "0.2.5", + "0.3.0", + "0.3.1", + "0.3.2", + "0.3.3", + "0.3.4", + "0.3.5", + "0.3.6", + "0.3.7", + "0.3.8", + "0.3.9", + "0.3.10", + "0.4.0", + "0.4.1", + "0.4.2", + "0.4.3", + "0.4.4", + "0.4.5", + "0.4.6", + "0.4.7", + "0.4.8", + "0.4.9", + "0.4.10", + "0.4.11", + "0.4.12", + "0.4.13", + "0.4.14", + "0.4.15", + "0.4.16", + "0.4.17", + "0.4.18", + "0.4.19", + "0.4.20", + "0.4.21", + "0.4.22", + "0.4.23", + "0.4.24", + "0.4.25", + "0.4.26", + "0.4.27", + "0.4.28", + "0.4.29", + "0.4.30", + "0.4.31", + "0.4.32-msk-iam", + "0.4.32", + "0.4.33", + "0.4.34", + "0.4.35", + "0.4.38", + "0.4.39", + "0.4.40", + "0.4.41", + "0.4.42", + "0.4.43", + "0.4.44", + "0.4.45", + "0.4.46", + "0.4.47" + ] + } + ] + } + ] + }, + { + "struct": "Reader", + "fields": [ + { + "field": "config", + "offsets": [ + { + "offset": 0, + "versions": [ + "0.1.0", + "0.2.0", + "0.2.1", + "0.2.2", + "0.2.3", + "0.2.4", + "0.2.5", + "0.3.0", + "0.3.1", + "0.3.2", + "0.3.3", + "0.3.4", + "0.3.5", + "0.3.6", + "0.3.7", + "0.3.8", + "0.3.9", + "0.3.10", + "0.4.0", + "0.4.1", + "0.4.2", + "0.4.3", + "0.4.4", + "0.4.5", + "0.4.6", + "0.4.7", + "0.4.8", + "0.4.9", + "0.4.10", + "0.4.11", + "0.4.12", + "0.4.13", + "0.4.14", + "0.4.15", + "0.4.16", + "0.4.17", + "0.4.18", + "0.4.19", + "0.4.20", + "0.4.21", + "0.4.22", + "0.4.23", + "0.4.24", + "0.4.25", + "0.4.26", + "0.4.27", + "0.4.28", + "0.4.29", + "0.4.30", + "0.4.31", + "0.4.32-msk-iam", + "0.4.32", + "0.4.33", + "0.4.34", + "0.4.35", + "0.4.38", + "0.4.39", + "0.4.40", + "0.4.41", + "0.4.42", + "0.4.43", + "0.4.44", + "0.4.45", + "0.4.46", + "0.4.47" + ] + } + ] + } + ] + }, + { + "struct": "ReaderConfig", + "fields": [ + { + "field": "GroupID", + "offsets": [ + { + "offset": 24, + "versions": [ + "0.1.0", + "0.2.0", + "0.2.1", + "0.2.2", + "0.2.3", + "0.2.4", + "0.2.5", + "0.3.0", + "0.3.1", + "0.3.2", + "0.3.3", + "0.3.4", + "0.3.5", + "0.3.6", + "0.3.7", + "0.3.8", + "0.3.9", + "0.3.10", + "0.4.0", + "0.4.1", + "0.4.2", + "0.4.3", + "0.4.4", + "0.4.5", + "0.4.6", + "0.4.7", + "0.4.8", + "0.4.9", + "0.4.10", + "0.4.11", + "0.4.12", + "0.4.13", + "0.4.14", + "0.4.15", + "0.4.16", + "0.4.17", + "0.4.18", + "0.4.19", + "0.4.20", + "0.4.21", + "0.4.22", + "0.4.23", + "0.4.24", + "0.4.25", + "0.4.26", + "0.4.27", + "0.4.28", + "0.4.29", + "0.4.30", + "0.4.31", + "0.4.32-msk-iam", + "0.4.32", + "0.4.33", + "0.4.34", + "0.4.35", + "0.4.38", + "0.4.39", + "0.4.40", + "0.4.41", + "0.4.42", + "0.4.43", + "0.4.44", + "0.4.45", + "0.4.46", + "0.4.47" + ] + } + ] + } + ] + }, + { + "struct": "Writer", + "fields": [ + { + "field": "Topic", + "offsets": [ + { + "offset": 16, + "versions": [ + "0.4.1", + "0.4.2", + "0.4.3", + "0.4.4", + "0.4.5", + "0.4.6", + "0.4.7", + "0.4.8", + "0.4.9", + "0.4.10", + "0.4.11", + "0.4.12", + "0.4.13", + "0.4.14", + "0.4.15", + "0.4.16", + "0.4.17", + "0.4.18", + "0.4.19", + "0.4.20", + "0.4.21", + "0.4.22", + "0.4.23", + "0.4.24", + "0.4.25", + "0.4.26", + "0.4.27", + "0.4.28", + "0.4.29", + "0.4.30", + "0.4.31", + "0.4.32-msk-iam", + "0.4.32", + "0.4.33", + "0.4.34", + "0.4.35", + "0.4.38", + "0.4.39", + "0.4.40", + "0.4.41", + "0.4.42", + "0.4.43", + "0.4.44", + "0.4.45", + "0.4.46", + "0.4.47" + ] + }, + { + "offset": 24, + "versions": [ + "0.1.0", + "0.2.0", + "0.2.1", + "0.2.2", + "0.2.3", + "0.2.4", + "0.2.5", + "0.3.0", + "0.3.1", + "0.3.2", + "0.3.3", + "0.3.4", + "0.3.5", + "0.3.6", + "0.3.7", + "0.3.8", + "0.3.9", + "0.3.10", + "0.4.0" + ] + } + ] + } + ] + } + ] + } + ] + }, { "module": "go.opentelemetry.io/otel", "packages": [ diff --git a/internal/pkg/instrumentation/bpf/database/sql/probe.go b/internal/pkg/instrumentation/bpf/database/sql/probe.go index 75da58f83..d6de1f9fa 100644 --- a/internal/pkg/instrumentation/bpf/database/sql/probe.go +++ b/internal/pkg/instrumentation/bpf/database/sql/probe.go @@ -148,7 +148,7 @@ type event struct { Query [256]byte } -func convertEvent(e *event) *probe.SpanEvent { +func convertEvent(e *event) []*probe.SpanEvent { query := unix.ByteSliceToString(e.Query[:]) sc := trace.NewSpanContext(trace.SpanContextConfig{ @@ -170,15 +170,17 @@ func convertEvent(e *event) *probe.SpanEvent { pscPtr = nil } - return &probe.SpanEvent{ - SpanName: "DB", - StartTime: int64(e.StartTime), - EndTime: int64(e.EndTime), - SpanContext: &sc, - Attributes: []attribute.KeyValue{ - semconv.DBStatementKey.String(query), + return []*probe.SpanEvent{ + { + SpanName: "DB", + StartTime: int64(e.StartTime), + EndTime: int64(e.EndTime), + SpanContext: &sc, + Attributes: []attribute.KeyValue{ + semconv.DBStatementKey.String(query), + }, + ParentSpanContext: pscPtr, }, - ParentSpanContext: pscPtr, } } diff --git a/internal/pkg/instrumentation/bpf/database/sql/probe_test.go b/internal/pkg/instrumentation/bpf/database/sql/probe_test.go index ce660ff55..c03dced14 100644 --- a/internal/pkg/instrumentation/bpf/database/sql/probe_test.go +++ b/internal/pkg/instrumentation/bpf/database/sql/probe_test.go @@ -59,5 +59,5 @@ func TestProbeConvertEvent(t *testing.T) { semconv.DBStatementKey.String("SELECT * FROM foo"), }, } - assert.Equal(t, want, got) + assert.Equal(t, want, got[0]) } diff --git a/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/bpf/probe.bpf.c b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/bpf/probe.bpf.c new file mode 100644 index 000000000..f515d7434 --- /dev/null +++ b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/bpf/probe.bpf.c @@ -0,0 +1,235 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "arguments.h" +#include "span_context.h" +#include "go_context.h" +#include "go_types.h" +#include "uprobe.h" + +char __license[] SEC("license") = "Dual MIT/GPL"; + +#define MAX_CONCURRENT 50 +// https://github.com/apache/kafka/blob/0.10.2/core/src/main/scala/kafka/common/Topic.scala#L30C3-L30C34 +#define MAX_TOPIC_SIZE 256 +// No constraint on the key size, but we must have a limit for the verifier +#define MAX_KEY_SIZE 256 +#define MAX_CONSUMER_GROUP_SIZE 128 + +struct kafka_request_t { + BASE_SPAN_PROPERTIES + char topic[MAX_TOPIC_SIZE]; + char key[MAX_KEY_SIZE]; + char consumer_group[MAX_CONSUMER_GROUP_SIZE]; + s64 offset; + s64 partition; +}__attribute__((packed)); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, void*); + __type(value, struct kafka_request_t); + __uint(max_entries, MAX_CONCURRENT); +} kafka_events SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, void*); + __type(value, void*); + __uint(max_entries, MAX_CONCURRENT); +} goroutine_to_go_context SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, void*); + __type(value, void*); + __uint(max_entries, MAX_CONCURRENT); +} kafka_reader_to_conn SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct kafka_request_t)); + __uint(max_entries, 1); +} kafka_request_storage_map SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct span_context)); + __uint(max_entries, 1); +} parent_span_context_storage_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); +} events SEC(".maps"); + +// https://github.com/segmentio/kafka-go/blob/main/protocol/record.go#L48 +struct kafka_header_t { + struct go_string key; + struct go_slice value; +}; + +// Injected in init +volatile const u64 message_key_pos; +volatile const u64 message_topic_pos; +volatile const u64 message_headers_pos; +volatile const u64 message_partition_pos; +volatile const u64 message_offset_pos; + +volatile const u64 reader_config_pos; +volatile const u64 reader_config_group_id_pos; + +#define MAX_HEADERS 20 + +static __always_inline struct span_context *extract_span_context_from_headers(void *message) { + // Read the headers slice descriptor + void *headers = (void *)(message + message_headers_pos); + struct go_slice headers_slice = {0}; + bpf_probe_read(&headers_slice, sizeof(headers_slice), headers); + + char key[W3C_KEY_LENGTH] = "traceparent"; + char current_key[W3C_KEY_LENGTH]; + + u32 map_id = 0; + struct span_context *parent_span_context = bpf_map_lookup_elem(&parent_span_context_storage_map, &map_id); + if (!parent_span_context) + { + return NULL; + } + + for (u64 i = 0; i < headers_slice.len; i++) { + if (i >= MAX_HEADERS) { + break; + } + // Read the header + struct kafka_header_t header = {0}; + bpf_probe_read(&header, sizeof(header), headers_slice.array + (i * sizeof(header))); + // Check if it is the traceparent header + if (header.key.len == W3C_KEY_LENGTH && header.value.len == W3C_VAL_LENGTH) { + bpf_probe_read_user(current_key, sizeof(current_key), header.key.str); + if (bpf_memcmp(key, current_key, sizeof(key))) { + // Found the traceparent header, extract the span context + char val[W3C_VAL_LENGTH]; + bpf_probe_read(val, W3C_VAL_LENGTH, header.value.array); + w3c_string_to_span_context(val, parent_span_context); + return parent_span_context; + } + } + } + + return NULL; +} + +// This instrumentation attaches uprobe to the following function: +// func (r *Reader) FetchMessage(ctx context.Context) (Message, error) +SEC("uprobe/FetchMessage") +int uprobe_FetchMessage(struct pt_regs *ctx) { + /* FetchMessage is a blocking function, hence its execution time is not a good indication for the time it took to handle the message. + Instead, we use the entry to this function to end the span which was started when it's last call returned. (A typical consumer calls FetchMessage in a loop) + A less confusing way of looking at it is as follows + 1. Entry to FetchMessage + 2. internal kafka code before blocking + 3. Blocking wait for message + 4. internal kafka code after blocking + 5. Return from FetchMessage + Steps 2-4 are executed in a seperate goroutine from the one the user of the library. + */ + void *reader = get_argument(ctx, 1); + void *context_data_ptr = get_Go_context(ctx, 3, 0, true); + void *goroutine = (void *)GOROUTINE(ctx); + struct kafka_request_t *kafka_request = bpf_map_lookup_elem(&kafka_events, &goroutine); + if (kafka_request == NULL) + { + // The current goroutine has no kafka request, + // this can happen in the first time FetchMessage is called + // Save the context for the return probe for in-process context propagation + goto save_context; + } + + get_go_string_from_user_ptr((void *)(reader + reader_config_pos + reader_config_group_id_pos), kafka_request->consumer_group, sizeof(kafka_request->consumer_group)); + kafka_request->end_time = bpf_ktime_get_ns(); + + bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, kafka_request, sizeof(*kafka_request)); + stop_tracking_span(&kafka_request->sc, &kafka_request->psc); + bpf_map_delete_elem(&kafka_events, &goroutine); + +save_context: + // Save the context for the return probe + bpf_map_update_elem(&goroutine_to_go_context, &goroutine, &context_data_ptr, 0); + return 0; +} + +// This instrumentation attaches uprobe to the following function: +// func (r *Reader) FetchMessage(ctx context.Context) (Message, error) +SEC("uprobe/FetchMessage") +int uprobe_FetchMessage_Returns(struct pt_regs *ctx) { + /* The FetchMessage function returns a message to the user after it read it from a channel. + The user consuming this message will handle it after this probe, + thus it is a good place to start track the span corresponds to this message. In addition we save the message + in a hash map to be read by the entry probe of FetchMessage, which will end this span */ + void *goroutine = (void *)GOROUTINE(ctx); + u32 map_id = 0; + struct kafka_request_t *kafka_request = bpf_map_lookup_elem(&kafka_request_storage_map, &map_id); + if (kafka_request == NULL) + { + bpf_printk("uuprobe/sendMessage: kafka_request is NULL"); + return 0; + } + kafka_request->start_time = bpf_ktime_get_ns(); + // The message returned on the stack since it returned as a struct and not a pointer + void *message = (void *)(PT_REGS_SP(ctx) + 8); + + // Get the parent span context from the message headers + struct span_context *parent_span_ctx = extract_span_context_from_headers(message); + if (parent_span_ctx != NULL) { + // Set the parent context + bpf_probe_read(&kafka_request->psc, sizeof(kafka_request->psc), parent_span_ctx); + copy_byte_arrays(kafka_request->psc.TraceID, kafka_request->sc.TraceID, TRACE_ID_SIZE); + generate_random_bytes(kafka_request->sc.SpanID, SPAN_ID_SIZE); + } else { + kafka_request->sc = generate_span_context(); + } + + // Collecting message attributes + // topic + get_go_string_from_user_ptr((void *)(message + message_topic_pos), kafka_request->topic, sizeof(kafka_request->topic)); + // partition + bpf_probe_read(&kafka_request->partition, sizeof(kafka_request->partition), (void *)(message + message_partition_pos)); + // offset + bpf_probe_read(&kafka_request->offset, sizeof(kafka_request->offset), (void *)(message + message_offset_pos)); + // Key is a byte slice, first read the slice descriptor + struct go_slice key_slice = {0}; + bpf_probe_read(&key_slice, sizeof(key_slice), (void *)(message + message_key_pos)); + u64 size_to_read = key_slice.len > MAX_KEY_SIZE ? MAX_KEY_SIZE : key_slice.len; + size_to_read &= 0xFF; + // Then read the actual key + bpf_probe_read(kafka_request->key, size_to_read, key_slice.array); + + bpf_map_update_elem(&kafka_events, &goroutine, kafka_request, 0); + + // We are start tracking the consumer span in the return probe, + // hence we can't read Go's context directly from the registers as we usually do. + // Using the goroutine address as a key to the map that contains the context. + void *context_data_ptr = bpf_map_lookup_elem(&goroutine_to_go_context, &goroutine); + if (context_data_ptr != NULL) { + bpf_probe_read_kernel(&context_data_ptr, sizeof(context_data_ptr), context_data_ptr); + start_tracking_span(context_data_ptr, &kafka_request->sc); + bpf_map_delete_elem(&goroutine_to_go_context, &goroutine); + } + + return 0; +} \ No newline at end of file diff --git a/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/bpf_bpfel_arm64.go b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/bpf_bpfel_arm64.go new file mode 100644 index 000000000..9ced3bd23 --- /dev/null +++ b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/bpf_bpfel_arm64.go @@ -0,0 +1,168 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build arm64 + +package consumer + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type bpfKafkaRequestT struct { + StartTime uint64 + EndTime uint64 + Sc bpfSpanContext + Psc bpfSpanContext + Topic [256]int8 + Key [256]int8 + ConsumerGroup [128]int8 + Offset int64 + Partition int64 +} + +type bpfSliceArrayBuff struct{ Buff [1024]uint8 } + +type bpfSpanContext struct { + TraceID [16]uint8 + SpanID [8]uint8 +} + +// loadBpf returns the embedded CollectionSpec for bpf. +func loadBpf() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_BpfBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load bpf: %w", err) + } + + return spec, err +} + +// loadBpfObjects loads bpf and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *bpfObjects +// *bpfPrograms +// *bpfMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadBpf() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// bpfSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfSpecs struct { + bpfProgramSpecs + bpfMapSpecs +} + +// bpfSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfProgramSpecs struct { + UprobeFetchMessage *ebpf.ProgramSpec `ebpf:"uprobe_FetchMessage"` + UprobeFetchMessageReturns *ebpf.ProgramSpec `ebpf:"uprobe_FetchMessage_Returns"` +} + +// bpfMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfMapSpecs struct { + AllocMap *ebpf.MapSpec `ebpf:"alloc_map"` + Events *ebpf.MapSpec `ebpf:"events"` + GoroutineToGoContext *ebpf.MapSpec `ebpf:"goroutine_to_go_context"` + KafkaEvents *ebpf.MapSpec `ebpf:"kafka_events"` + KafkaReaderToConn *ebpf.MapSpec `ebpf:"kafka_reader_to_conn"` + KafkaRequestStorageMap *ebpf.MapSpec `ebpf:"kafka_request_storage_map"` + ParentSpanContextStorageMap *ebpf.MapSpec `ebpf:"parent_span_context_storage_map"` + SliceArrayBuffMap *ebpf.MapSpec `ebpf:"slice_array_buff_map"` + TrackedSpans *ebpf.MapSpec `ebpf:"tracked_spans"` + TrackedSpansBySc *ebpf.MapSpec `ebpf:"tracked_spans_by_sc"` +} + +// bpfObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfObjects struct { + bpfPrograms + bpfMaps +} + +func (o *bpfObjects) Close() error { + return _BpfClose( + &o.bpfPrograms, + &o.bpfMaps, + ) +} + +// bpfMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfMaps struct { + AllocMap *ebpf.Map `ebpf:"alloc_map"` + Events *ebpf.Map `ebpf:"events"` + GoroutineToGoContext *ebpf.Map `ebpf:"goroutine_to_go_context"` + KafkaEvents *ebpf.Map `ebpf:"kafka_events"` + KafkaReaderToConn *ebpf.Map `ebpf:"kafka_reader_to_conn"` + KafkaRequestStorageMap *ebpf.Map `ebpf:"kafka_request_storage_map"` + ParentSpanContextStorageMap *ebpf.Map `ebpf:"parent_span_context_storage_map"` + SliceArrayBuffMap *ebpf.Map `ebpf:"slice_array_buff_map"` + TrackedSpans *ebpf.Map `ebpf:"tracked_spans"` + TrackedSpansBySc *ebpf.Map `ebpf:"tracked_spans_by_sc"` +} + +func (m *bpfMaps) Close() error { + return _BpfClose( + m.AllocMap, + m.Events, + m.GoroutineToGoContext, + m.KafkaEvents, + m.KafkaReaderToConn, + m.KafkaRequestStorageMap, + m.ParentSpanContextStorageMap, + m.SliceArrayBuffMap, + m.TrackedSpans, + m.TrackedSpansBySc, + ) +} + +// bpfPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfPrograms struct { + UprobeFetchMessage *ebpf.Program `ebpf:"uprobe_FetchMessage"` + UprobeFetchMessageReturns *ebpf.Program `ebpf:"uprobe_FetchMessage_Returns"` +} + +func (p *bpfPrograms) Close() error { + return _BpfClose( + p.UprobeFetchMessage, + p.UprobeFetchMessageReturns, + ) +} + +func _BpfClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_bpfel_arm64.o +var _BpfBytes []byte diff --git a/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/bpf_bpfel_x86.go b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/bpf_bpfel_x86.go new file mode 100644 index 000000000..3c08c04c5 --- /dev/null +++ b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/bpf_bpfel_x86.go @@ -0,0 +1,168 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build 386 || amd64 + +package consumer + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type bpfKafkaRequestT struct { + StartTime uint64 + EndTime uint64 + Sc bpfSpanContext + Psc bpfSpanContext + Topic [256]int8 + Key [256]int8 + ConsumerGroup [128]int8 + Offset int64 + Partition int64 +} + +type bpfSliceArrayBuff struct{ Buff [1024]uint8 } + +type bpfSpanContext struct { + TraceID [16]uint8 + SpanID [8]uint8 +} + +// loadBpf returns the embedded CollectionSpec for bpf. +func loadBpf() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_BpfBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load bpf: %w", err) + } + + return spec, err +} + +// loadBpfObjects loads bpf and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *bpfObjects +// *bpfPrograms +// *bpfMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadBpf() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// bpfSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfSpecs struct { + bpfProgramSpecs + bpfMapSpecs +} + +// bpfSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfProgramSpecs struct { + UprobeFetchMessage *ebpf.ProgramSpec `ebpf:"uprobe_FetchMessage"` + UprobeFetchMessageReturns *ebpf.ProgramSpec `ebpf:"uprobe_FetchMessage_Returns"` +} + +// bpfMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfMapSpecs struct { + AllocMap *ebpf.MapSpec `ebpf:"alloc_map"` + Events *ebpf.MapSpec `ebpf:"events"` + GoroutineToGoContext *ebpf.MapSpec `ebpf:"goroutine_to_go_context"` + KafkaEvents *ebpf.MapSpec `ebpf:"kafka_events"` + KafkaReaderToConn *ebpf.MapSpec `ebpf:"kafka_reader_to_conn"` + KafkaRequestStorageMap *ebpf.MapSpec `ebpf:"kafka_request_storage_map"` + ParentSpanContextStorageMap *ebpf.MapSpec `ebpf:"parent_span_context_storage_map"` + SliceArrayBuffMap *ebpf.MapSpec `ebpf:"slice_array_buff_map"` + TrackedSpans *ebpf.MapSpec `ebpf:"tracked_spans"` + TrackedSpansBySc *ebpf.MapSpec `ebpf:"tracked_spans_by_sc"` +} + +// bpfObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfObjects struct { + bpfPrograms + bpfMaps +} + +func (o *bpfObjects) Close() error { + return _BpfClose( + &o.bpfPrograms, + &o.bpfMaps, + ) +} + +// bpfMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfMaps struct { + AllocMap *ebpf.Map `ebpf:"alloc_map"` + Events *ebpf.Map `ebpf:"events"` + GoroutineToGoContext *ebpf.Map `ebpf:"goroutine_to_go_context"` + KafkaEvents *ebpf.Map `ebpf:"kafka_events"` + KafkaReaderToConn *ebpf.Map `ebpf:"kafka_reader_to_conn"` + KafkaRequestStorageMap *ebpf.Map `ebpf:"kafka_request_storage_map"` + ParentSpanContextStorageMap *ebpf.Map `ebpf:"parent_span_context_storage_map"` + SliceArrayBuffMap *ebpf.Map `ebpf:"slice_array_buff_map"` + TrackedSpans *ebpf.Map `ebpf:"tracked_spans"` + TrackedSpansBySc *ebpf.Map `ebpf:"tracked_spans_by_sc"` +} + +func (m *bpfMaps) Close() error { + return _BpfClose( + m.AllocMap, + m.Events, + m.GoroutineToGoContext, + m.KafkaEvents, + m.KafkaReaderToConn, + m.KafkaRequestStorageMap, + m.ParentSpanContextStorageMap, + m.SliceArrayBuffMap, + m.TrackedSpans, + m.TrackedSpansBySc, + ) +} + +// bpfPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfPrograms struct { + UprobeFetchMessage *ebpf.Program `ebpf:"uprobe_FetchMessage"` + UprobeFetchMessageReturns *ebpf.Program `ebpf:"uprobe_FetchMessage_Returns"` +} + +func (p *bpfPrograms) Close() error { + return _BpfClose( + p.UprobeFetchMessage, + p.UprobeFetchMessageReturns, + ) +} + +func _BpfClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_bpfel_x86.o +var _BpfBytes []byte diff --git a/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/probe.go b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/probe.go new file mode 100644 index 000000000..f7de027ab --- /dev/null +++ b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/probe.go @@ -0,0 +1,183 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumer + +import ( + "fmt" + "os" + + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" + "github.com/go-logr/logr" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sys/unix" + + "go.opentelemetry.io/auto/internal/pkg/instrumentation/context" + "go.opentelemetry.io/auto/internal/pkg/instrumentation/probe" + "go.opentelemetry.io/auto/internal/pkg/process" + "go.opentelemetry.io/auto/internal/pkg/structfield" +) + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -target amd64,arm64 -cc clang -cflags $CFLAGS bpf ./bpf/probe.bpf.c + +const ( + // pkg is the package being instrumented. + pkg = "github.com/segmentio/kafka-go" +) + +// New returns a new [probe.Probe]. +func New(logger logr.Logger) probe.Probe { + id := probe.ID{ + SpanKind: trace.SpanKindConsumer, + InstrumentedPkg: pkg, + } + return &probe.Base[bpfObjects, event]{ + ID: id, + Logger: logger.WithName(id.String()), + Consts: []probe.Const{ + probe.RegistersABIConst{}, + probe.AllocationConst{}, + probe.StructFieldConst{ + Key: "message_headers_pos", + Val: structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Headers"), + }, + probe.StructFieldConst{ + Key: "message_key_pos", + Val: structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Key"), + }, + probe.StructFieldConst{ + Key: "message_topic_pos", + Val: structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Topic"), + }, + probe.StructFieldConst{ + Key: "message_partition_pos", + Val: structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Partition"), + }, + probe.StructFieldConst{ + Key: "message_offset_pos", + Val: structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Offset"), + }, + probe.StructFieldConst{ + Key: "reader_config_pos", + Val: structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Reader", "config"), + }, + probe.StructFieldConst{ + Key: "reader_config_group_id_pos", + Val: structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "ReaderConfig", "GroupID"), + }, + }, + Uprobes: []probe.Uprobe[bpfObjects]{ + { + Sym: "github.com/segmentio/kafka-go.(*Reader).FetchMessage", + Fn: uprobeFetchMessage, + }, + }, + ReaderFn: func(obj bpfObjects) (*perf.Reader, error) { + return perf.NewReader(obj.Events, os.Getpagesize()*100) + }, + SpecFn: loadBpf, + ProcessFn: convertEvent, + } +} + +func uprobeFetchMessage(name string, exec *link.Executable, target *process.TargetDetails, obj *bpfObjects) ([]link.Link, error) { + offset, err := target.GetFunctionOffset(name) + if err != nil { + return nil, err + } + + opts := &link.UprobeOptions{Address: offset} + l, err := exec.Uprobe("", obj.UprobeFetchMessage, opts) + if err != nil { + return nil, err + } + + links := []link.Link{l} + + retOffsets, err := target.GetFunctionReturns(name) + if err != nil { + return nil, err + } + + for _, ret := range retOffsets { + opts := &link.UprobeOptions{Address: ret} + l, err := exec.Uprobe("", obj.UprobeFetchMessageReturns, opts) + if err != nil { + return nil, err + } + links = append(links, l) + } + + return links, nil +} + +// event represents a kafka message received by the consumer. +type event struct { + context.BaseSpanProperties + Topic [256]byte + Key [256]byte + ConsumerGroup [128]byte + Offset int64 + Partition int64 +} + +func convertEvent(e *event) []*probe.SpanEvent { + sc := trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: e.SpanContext.TraceID, + SpanID: e.SpanContext.SpanID, + TraceFlags: trace.FlagsSampled, + }) + + var pscPtr *trace.SpanContext + if e.ParentSpanContext.TraceID.IsValid() { + psc := trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: e.ParentSpanContext.TraceID, + SpanID: e.ParentSpanContext.SpanID, + TraceFlags: trace.FlagsSampled, + Remote: true, + }) + pscPtr = &psc + } else { + pscPtr = nil + } + + topic := unix.ByteSliceToString(e.Topic[:]) + + attributes := []attribute.KeyValue{ + semconv.MessagingSystemKafka, + semconv.MessagingOperationReceive, + semconv.MessagingKafkaDestinationPartition(int(e.Partition)), + semconv.MessagingDestinationName(topic), + semconv.MessagingKafkaMessageOffset(int(e.Offset)), + semconv.MessagingKafkaMessageKey(unix.ByteSliceToString(e.Key[:])), + semconv.MessagingKafkaConsumerGroup(unix.ByteSliceToString(e.ConsumerGroup[:])), + } + return []*probe.SpanEvent{ + { + SpanName: kafkaConsumerSpanName(topic), + StartTime: int64(e.StartTime), + EndTime: int64(e.EndTime), + SpanContext: &sc, + ParentSpanContext: pscPtr, + Attributes: attributes, + }, + } +} + +func kafkaConsumerSpanName(topic string) string { + return fmt.Sprintf("%s receive", topic) +} diff --git a/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/probe_test.go b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/probe_test.go new file mode 100644 index 000000000..169a8e11a --- /dev/null +++ b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer/probe_test.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package consumer + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/auto/internal/pkg/instrumentation/context" + "go.opentelemetry.io/auto/internal/pkg/instrumentation/probe" +) + +func TestProbeConvertEvent(t *testing.T) { + start := time.Now() + end := start.Add(1 * time.Second) + + traceID := trace.TraceID{1} + spanID := trace.SpanID{1} + + got := convertEvent(&event{ + BaseSpanProperties: context.BaseSpanProperties{ + StartTime: uint64(start.UnixNano()), + EndTime: uint64(end.UnixNano()), + SpanContext: context.EBPFSpanContext{TraceID: traceID, SpanID: spanID}, + }, + // topic1 + Topic: [256]byte{0x74, 0x6f, 0x70, 0x69, 0x63, 0x31}, + // key1 + Key: [256]byte{0x6b, 0x65, 0x79, 0x31}, + // test consumer group + ConsumerGroup: [128]byte{0x74, 0x65, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70}, + Offset: 42, + Partition: 12, + }) + + sc := trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: traceID, + SpanID: spanID, + TraceFlags: trace.FlagsSampled, + }) + want := &probe.SpanEvent{ + SpanName: kafkaConsumerSpanName("topic1"), + StartTime: int64(start.UnixNano()), + EndTime: int64(end.UnixNano()), + SpanContext: &sc, + Attributes: []attribute.KeyValue{ + semconv.MessagingSystemKafka, + semconv.MessagingOperationReceive, + semconv.MessagingKafkaDestinationPartition(12), + semconv.MessagingDestinationName("topic1"), + semconv.MessagingKafkaMessageOffset(42), + semconv.MessagingKafkaMessageKey("key1"), + semconv.MessagingKafkaConsumerGroup("test consumer group"), + }, + } + assert.Equal(t, want, got[0]) +} diff --git a/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/bpf/probe.bpf.c b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/bpf/probe.bpf.c new file mode 100644 index 000000000..62d6bf493 --- /dev/null +++ b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/bpf/probe.bpf.c @@ -0,0 +1,242 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "arguments.h" +#include "span_context.h" +#include "go_context.h" +#include "go_types.h" +#include "uprobe.h" + +char __license[] SEC("license") = "Dual MIT/GPL"; + +#define MAX_CONCURRENT 50 +// https://github.com/segmentio/kafka-go/blob/main/writer.go#L118 +// TODO: (this value is directly impact the map sizes as well as the verification complexity) +// limitation on map entry size: https://github.com/iovisor/bcc/issues/2519#issuecomment-534359316 +// the default value is 100, but it can be changed by the user +// we must specify a limit for the verifier +#define MAX_BATCH_SIZE 10 +// https://github.com/apache/kafka/blob/0.10.2/core/src/main/scala/kafka/common/Topic.scala#L30C3-L30C34 +#define MAX_TOPIC_SIZE 256 +// No constraint on the key size, but we must have a limit for the verifier +#define MAX_KEY_SIZE 256 + +struct message_attributes_t { + unsigned char SpanID[SPAN_ID_SIZE]; + char topic[MAX_TOPIC_SIZE]; + char key[MAX_KEY_SIZE]; +}; + +struct kafka_request_t { + // common attributes to all the produced messages + u64 start_time; + u64 end_time; + struct span_context psc; + unsigned char TraceID[TRACE_ID_SIZE]; + // attributes per message + struct message_attributes_t msgs[MAX_BATCH_SIZE]; + char global_topic[MAX_TOPIC_SIZE]; + u64 valid_messages; +}__attribute__((packed)); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, void*); + __type(value, struct kafka_request_t); + __uint(max_entries, MAX_CONCURRENT); +} kafka_events SEC(".maps"); + +struct +{ + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(struct kafka_request_t)); + __uint(max_entries, 1); +} kafka_request_storage_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); +} events SEC(".maps"); + +// https://github.com/segmentio/kafka-go/blob/main/protocol/record.go#L48 +struct kafka_header_t { + struct go_string key; + struct go_slice value; +}; + +// Injected in init +volatile const u64 message_key_pos; +volatile const u64 message_topic_pos; +volatile const u64 message_headers_pos; +volatile const u64 message_time_pos; + +volatile const u64 writer_topic_pos; + +static __always_inline int build_contxet_header(struct kafka_header_t *header, struct span_context *span_ctx) { + if (header == NULL || span_ctx == NULL) { + bpf_printk("build_contxt_header: Invalid arguments"); + return -1; + } + + // Prepare the key string for the user + char key[W3C_KEY_LENGTH] = "traceparent"; + void *ptr = write_target_data(key, W3C_KEY_LENGTH); + if (ptr == NULL) { + bpf_printk("build_contxt_header: Failed to write key to user"); + return -1; + } + + // build the go string of the key + header->key.str = ptr; + header->key.len = W3C_KEY_LENGTH; + + // Prepare the value string for the user + char val[W3C_VAL_LENGTH]; + span_context_to_w3c_string(span_ctx, val); + ptr = write_target_data(val, sizeof(val)); + if (ptr == NULL) { + bpf_printk("build_contxt_header: Failed to write value to user"); + return -1; + } + + // build the go slice of the value + header->value.array = ptr; + header->value.len = W3C_VAL_LENGTH; + header->value.cap = W3C_VAL_LENGTH; + bpf_printk("build_contxt_header success"); + return 0; +} + +static __always_inline int inject_kafka_header(void *message, struct kafka_header_t *header) { + append_item_to_slice(header, sizeof(*header), (void *)(message + message_headers_pos)); + return 0; +} + +static __always_inline long collect_kafka_attributes(void *message, struct message_attributes_t *attrs, bool collect_topic) { + if (collect_topic) { + // Topic might be globaly set for a writer, or per message + get_go_string_from_user_ptr((void *)(message + message_topic_pos), attrs->topic, sizeof(attrs->topic)); + } + + // Key is a byte slice, first read the slice + struct go_slice key_slice = {0}; + bpf_probe_read(&key_slice, sizeof(key_slice), (void *)(message + message_key_pos)); + u64 size_to_read = key_slice.len > MAX_KEY_SIZE ? MAX_KEY_SIZE : key_slice.len; + size_to_read &= 0xFF; + // Then read the actual key + return bpf_probe_read(attrs->key, size_to_read, key_slice.array); +} + +// This instrumentation attaches uprobe to the following function: +// func (w *Writer) WriteMessages(ctx context.Context, msgs ...Message) error +SEC("uprobe/WriteMessages") +int uprobe_WriteMessages(struct pt_regs *ctx) { + // In Go, "..." is equivalent to passing a slice: https://go.dev/ref/spec#Passing_arguments_to_..._parameters + void *writer = get_argument(ctx, 1); + void *msgs_array = get_argument(ctx, 4); + u64 msgs_array_len = (u64)get_argument(ctx, 5); + + // Get key + void *context_data_ptr = get_Go_context(ctx, 3, 0, true); + void *key = get_consistent_key(ctx, context_data_ptr); + + void *kafka_request_ptr = bpf_map_lookup_elem(&kafka_events, &key); + if (kafka_request_ptr != NULL) + { + bpf_printk("uprobe/WriteMessages already tracked with the current context"); + return 0; + } + + u32 map_id = 0; + struct kafka_request_t *kafka_request = bpf_map_lookup_elem(&kafka_request_storage_map, &map_id); + if (kafka_request == NULL) + { + bpf_printk("uuprobe/WriteMessages: kafka_request is NULL"); + return 0; + } + kafka_request->start_time = bpf_ktime_get_ns(); + + // Get parent if exists + struct span_context *parent_span_ctx = get_parent_span_context(context_data_ptr); + if (parent_span_ctx != NULL) { + // Set the parent context + bpf_probe_read(&kafka_request->psc, sizeof(kafka_request->psc), parent_span_ctx); + copy_byte_arrays(kafka_request->psc.TraceID, kafka_request->TraceID, TRACE_ID_SIZE); + } else { + generate_random_bytes(kafka_request->TraceID, TRACE_ID_SIZE); + } + + // Try to get a global topic from Writer + bool global_topic = get_go_string_from_user_ptr((void *)(writer + writer_topic_pos), kafka_request->global_topic, sizeof(kafka_request->global_topic)); + + void *msg_ptr = msgs_array; + struct kafka_header_t header = {0}; + struct span_context current_sc = {0}; + // This is hack to get the message size. This calculation is based on the following assumptions: + // 1. "Time" is the last field in the message struct. This looks to be correct for all the versions according to + // https://github.com/segmentio/kafka-go/blob/v0.2.3/message.go#L24C2-L24C6 + // 2. the time.Time struct is 24 bytes. This looks to be correct for all the reasnobaly latest versions according to + // https://github.com/golang/go/blame/master/src/time/time.go#L135 + // In the future if more libraries will need to get structs sizes we probably want to have simillar + // mechanism to the one we have for the offsets + u16 msg_size = message_time_pos + 8 + 8 + 8; + __builtin_memcpy(current_sc.TraceID, kafka_request->TraceID, TRACE_ID_SIZE); + kafka_request->valid_messages = 0; + // Iterate over the messages + for (u64 i = 0; i < MAX_BATCH_SIZE; i++) { + if (i >= msgs_array_len) { + break; + } + // Optionaly collect the topic, and always collect key + collect_kafka_attributes(msg_ptr, &kafka_request->msgs[i], !global_topic); + // Generate span id for each message + generate_random_bytes(kafka_request->msgs[i].SpanID, SPAN_ID_SIZE); + __builtin_memcpy(current_sc.SpanID, kafka_request->msgs[i].SpanID, SPAN_ID_SIZE); + // Build the header + if (build_contxet_header(&header, ¤t_sc) != 0) { + bpf_printk("uprobe/WriteMessages: Failed to build header"); + return 0; + } + // Inject the header + inject_kafka_header(msg_ptr, &header); + kafka_request->valid_messages++; + msg_ptr = msg_ptr + msg_size; + } + + + bpf_map_update_elem(&kafka_events, &key, kafka_request, 0); + // don't need to start tracking the span, as we don't have a context to propagate localy + return 0; +} + +// This instrumentation attaches uprobe to the following function: +// func (w *Writer) WriteMessages(ctx context.Context, msgs ...Message) error +SEC("uprobe/WriteMessages") +int uprobe_WriteMessages_Returns(struct pt_regs *ctx) { + u64 end_time = bpf_ktime_get_ns(); + void *context_data_ptr = get_Go_context(ctx, 3, 0, true); + void *key = get_consistent_key(ctx, context_data_ptr); + + struct kafka_request_t *kafka_request = bpf_map_lookup_elem(&kafka_events, &key); + if (kafka_request == NULL) { + bpf_printk("kafka_request is null\n"); + return 0; + } + kafka_request->end_time = end_time; + + bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, kafka_request, sizeof(*kafka_request)); + bpf_map_delete_elem(&kafka_events, &key); + // don't need to stop tracking the span, as we don't have a context to propagate localy + return 0; +} \ No newline at end of file diff --git a/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/bpf_bpfel_arm64.go b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/bpf_bpfel_arm64.go new file mode 100644 index 000000000..c3a289f2c --- /dev/null +++ b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/bpf_bpfel_arm64.go @@ -0,0 +1,161 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build arm64 + +package producer + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type bpfKafkaRequestT struct { + StartTime uint64 + EndTime uint64 + Psc bpfSpanContext + TraceID [16]uint8 + Msgs [10]struct { + SpanID [8]uint8 + Topic [256]int8 + Key [256]int8 + } + GlobalTopic [256]int8 + ValidMessages uint64 +} + +type bpfSliceArrayBuff struct{ Buff [1024]uint8 } + +type bpfSpanContext struct { + TraceID [16]uint8 + SpanID [8]uint8 +} + +// loadBpf returns the embedded CollectionSpec for bpf. +func loadBpf() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_BpfBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load bpf: %w", err) + } + + return spec, err +} + +// loadBpfObjects loads bpf and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *bpfObjects +// *bpfPrograms +// *bpfMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadBpf() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// bpfSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfSpecs struct { + bpfProgramSpecs + bpfMapSpecs +} + +// bpfSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfProgramSpecs struct { + UprobeWriteMessages *ebpf.ProgramSpec `ebpf:"uprobe_WriteMessages"` + UprobeWriteMessagesReturns *ebpf.ProgramSpec `ebpf:"uprobe_WriteMessages_Returns"` +} + +// bpfMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfMapSpecs struct { + AllocMap *ebpf.MapSpec `ebpf:"alloc_map"` + Events *ebpf.MapSpec `ebpf:"events"` + KafkaEvents *ebpf.MapSpec `ebpf:"kafka_events"` + KafkaRequestStorageMap *ebpf.MapSpec `ebpf:"kafka_request_storage_map"` + SliceArrayBuffMap *ebpf.MapSpec `ebpf:"slice_array_buff_map"` + TrackedSpans *ebpf.MapSpec `ebpf:"tracked_spans"` + TrackedSpansBySc *ebpf.MapSpec `ebpf:"tracked_spans_by_sc"` +} + +// bpfObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfObjects struct { + bpfPrograms + bpfMaps +} + +func (o *bpfObjects) Close() error { + return _BpfClose( + &o.bpfPrograms, + &o.bpfMaps, + ) +} + +// bpfMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfMaps struct { + AllocMap *ebpf.Map `ebpf:"alloc_map"` + Events *ebpf.Map `ebpf:"events"` + KafkaEvents *ebpf.Map `ebpf:"kafka_events"` + KafkaRequestStorageMap *ebpf.Map `ebpf:"kafka_request_storage_map"` + SliceArrayBuffMap *ebpf.Map `ebpf:"slice_array_buff_map"` + TrackedSpans *ebpf.Map `ebpf:"tracked_spans"` + TrackedSpansBySc *ebpf.Map `ebpf:"tracked_spans_by_sc"` +} + +func (m *bpfMaps) Close() error { + return _BpfClose( + m.AllocMap, + m.Events, + m.KafkaEvents, + m.KafkaRequestStorageMap, + m.SliceArrayBuffMap, + m.TrackedSpans, + m.TrackedSpansBySc, + ) +} + +// bpfPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfPrograms struct { + UprobeWriteMessages *ebpf.Program `ebpf:"uprobe_WriteMessages"` + UprobeWriteMessagesReturns *ebpf.Program `ebpf:"uprobe_WriteMessages_Returns"` +} + +func (p *bpfPrograms) Close() error { + return _BpfClose( + p.UprobeWriteMessages, + p.UprobeWriteMessagesReturns, + ) +} + +func _BpfClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_bpfel_arm64.o +var _BpfBytes []byte diff --git a/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/bpf_bpfel_x86.go b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/bpf_bpfel_x86.go new file mode 100644 index 000000000..0bcb52edd --- /dev/null +++ b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/bpf_bpfel_x86.go @@ -0,0 +1,161 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build 386 || amd64 + +package producer + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type bpfKafkaRequestT struct { + StartTime uint64 + EndTime uint64 + Psc bpfSpanContext + TraceID [16]uint8 + Msgs [10]struct { + SpanID [8]uint8 + Topic [256]int8 + Key [256]int8 + } + GlobalTopic [256]int8 + ValidMessages uint64 +} + +type bpfSliceArrayBuff struct{ Buff [1024]uint8 } + +type bpfSpanContext struct { + TraceID [16]uint8 + SpanID [8]uint8 +} + +// loadBpf returns the embedded CollectionSpec for bpf. +func loadBpf() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_BpfBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load bpf: %w", err) + } + + return spec, err +} + +// loadBpfObjects loads bpf and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *bpfObjects +// *bpfPrograms +// *bpfMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func loadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := loadBpf() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// bpfSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfSpecs struct { + bpfProgramSpecs + bpfMapSpecs +} + +// bpfSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfProgramSpecs struct { + UprobeWriteMessages *ebpf.ProgramSpec `ebpf:"uprobe_WriteMessages"` + UprobeWriteMessagesReturns *ebpf.ProgramSpec `ebpf:"uprobe_WriteMessages_Returns"` +} + +// bpfMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type bpfMapSpecs struct { + AllocMap *ebpf.MapSpec `ebpf:"alloc_map"` + Events *ebpf.MapSpec `ebpf:"events"` + KafkaEvents *ebpf.MapSpec `ebpf:"kafka_events"` + KafkaRequestStorageMap *ebpf.MapSpec `ebpf:"kafka_request_storage_map"` + SliceArrayBuffMap *ebpf.MapSpec `ebpf:"slice_array_buff_map"` + TrackedSpans *ebpf.MapSpec `ebpf:"tracked_spans"` + TrackedSpansBySc *ebpf.MapSpec `ebpf:"tracked_spans_by_sc"` +} + +// bpfObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfObjects struct { + bpfPrograms + bpfMaps +} + +func (o *bpfObjects) Close() error { + return _BpfClose( + &o.bpfPrograms, + &o.bpfMaps, + ) +} + +// bpfMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfMaps struct { + AllocMap *ebpf.Map `ebpf:"alloc_map"` + Events *ebpf.Map `ebpf:"events"` + KafkaEvents *ebpf.Map `ebpf:"kafka_events"` + KafkaRequestStorageMap *ebpf.Map `ebpf:"kafka_request_storage_map"` + SliceArrayBuffMap *ebpf.Map `ebpf:"slice_array_buff_map"` + TrackedSpans *ebpf.Map `ebpf:"tracked_spans"` + TrackedSpansBySc *ebpf.Map `ebpf:"tracked_spans_by_sc"` +} + +func (m *bpfMaps) Close() error { + return _BpfClose( + m.AllocMap, + m.Events, + m.KafkaEvents, + m.KafkaRequestStorageMap, + m.SliceArrayBuffMap, + m.TrackedSpans, + m.TrackedSpansBySc, + ) +} + +// bpfPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type bpfPrograms struct { + UprobeWriteMessages *ebpf.Program `ebpf:"uprobe_WriteMessages"` + UprobeWriteMessagesReturns *ebpf.Program `ebpf:"uprobe_WriteMessages_Returns"` +} + +func (p *bpfPrograms) Close() error { + return _BpfClose( + p.UprobeWriteMessages, + p.UprobeWriteMessagesReturns, + ) +} + +func _BpfClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_bpfel_x86.o +var _BpfBytes []byte diff --git a/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/probe.go b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/probe.go new file mode 100644 index 000000000..e2620d1e0 --- /dev/null +++ b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/probe.go @@ -0,0 +1,204 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package producer + +import ( + "fmt" + "os" + + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" + "github.com/go-logr/logr" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sys/unix" + + "go.opentelemetry.io/auto/internal/pkg/instrumentation/context" + "go.opentelemetry.io/auto/internal/pkg/instrumentation/probe" + "go.opentelemetry.io/auto/internal/pkg/process" + "go.opentelemetry.io/auto/internal/pkg/structfield" +) + +//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -target amd64,arm64 -cc clang -cflags $CFLAGS bpf ./bpf/probe.bpf.c + +const ( + // pkg is the package being instrumented. + pkg = "github.com/segmentio/kafka-go" +) + +// New returns a new [probe.Probe]. +func New(logger logr.Logger) probe.Probe { + id := probe.ID{ + SpanKind: trace.SpanKindProducer, + InstrumentedPkg: pkg, + } + return &probe.Base[bpfObjects, event]{ + ID: id, + Logger: logger.WithName(id.String()), + Consts: []probe.Const{ + probe.RegistersABIConst{}, + probe.AllocationConst{}, + probe.StructFieldConst{ + Key: "writer_topic_pos", + Val: structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Writer", "Topic"), + }, + probe.StructFieldConst{ + Key: "message_headers_pos", + Val: structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Headers"), + }, + probe.StructFieldConst{ + Key: "message_key_pos", + Val: structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Key"), + }, + probe.StructFieldConst{ + Key: "message_time_pos", + Val: structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Time"), + }, + }, + Uprobes: []probe.Uprobe[bpfObjects]{ + { + Sym: "github.com/segmentio/kafka-go.(*Writer).WriteMessages", + Fn: uprobeWriteMessages, + }, + }, + ReaderFn: func(obj bpfObjects) (*perf.Reader, error) { + return perf.NewReader(obj.Events, os.Getpagesize()*100) + }, + SpecFn: loadBpf, + ProcessFn: convertEvent, + } +} + +func uprobeWriteMessages(name string, exec *link.Executable, target *process.TargetDetails, obj *bpfObjects) ([]link.Link, error) { + offset, err := target.GetFunctionOffset(name) + if err != nil { + return nil, err + } + + opts := &link.UprobeOptions{Address: offset} + l, err := exec.Uprobe("", obj.UprobeWriteMessages, opts) + if err != nil { + return nil, err + } + + links := []link.Link{l} + + retOffsets, err := target.GetFunctionReturns(name) + if err != nil { + return nil, err + } + + for _, ret := range retOffsets { + opts := &link.UprobeOptions{Address: ret} + l, err := exec.Uprobe("", obj.UprobeWriteMessagesReturns, opts) + if err != nil { + return nil, err + } + links = append(links, l) + } + + return links, nil +} + +type messageAttributes struct { + SpaID trace.SpanID + Topic [256]byte + Key [256]byte +} + +// event represents a batch of kafka messages being sent. +type event struct { + StartTime uint64 + EndTime uint64 + ParentSpanContext context.EBPFSpanContext + // Same trace id for all the batch + TraceID trace.TraceID + // Message specific attributes + Messages [10]messageAttributes + // Global topic for the batch + GlobalTopic [256]byte + // Number of valid messages in the batch + ValidMessages uint64 +} + +func convertEvent(e *event) []*probe.SpanEvent { + tsc := trace.SpanContextConfig{ + TraceID: e.TraceID, + TraceFlags: trace.FlagsSampled, + } + + var pscPtr *trace.SpanContext + if e.ParentSpanContext.TraceID.IsValid() { + psc := trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: e.ParentSpanContext.TraceID, + SpanID: e.ParentSpanContext.SpanID, + TraceFlags: trace.FlagsSampled, + Remote: true, + }) + pscPtr = &psc + } else { + pscPtr = nil + } + + globalTopic := unix.ByteSliceToString(e.GlobalTopic[:]) + + var commonAttrs []attribute.KeyValue = []attribute.KeyValue{semconv.MessagingSystemKafka, semconv.MessagingOperationPublish} + if len(globalTopic) > 0 { + commonAttrs = append(commonAttrs, semconv.MessagingDestinationName(globalTopic)) + } + + if e.ValidMessages > 0 { + commonAttrs = append(commonAttrs, semconv.MessagingBatchMessageCount(int(e.ValidMessages))) + } + + var res []*probe.SpanEvent + var msgTopic string + for i := uint64(0); i < e.ValidMessages; i++ { + tsc.SpanID = e.Messages[i].SpaID + sc := trace.NewSpanContext(tsc) + key := unix.ByteSliceToString(e.Messages[i].Key[:]) + + msgAttrs := []attribute.KeyValue{} + if len(key) > 0 { + msgAttrs = append(msgAttrs, semconv.MessagingKafkaMessageKey(key)) + } + + // Topic is either the global topic or the message specific topic + if len(globalTopic) == 0 { + msgTopic = unix.ByteSliceToString(e.Messages[i].Topic[:]) + } else { + msgTopic = globalTopic + } + + msgAttrs = append(msgAttrs, semconv.MessagingDestinationName(msgTopic)) + msgAttrs = append(msgAttrs, commonAttrs...) + + res = append(res, &probe.SpanEvent{ + SpanName: kafkaProducerSpanName(msgTopic), + StartTime: int64(e.StartTime), + EndTime: int64(e.EndTime), + SpanContext: &sc, + Attributes: msgAttrs, + ParentSpanContext: pscPtr, + }) + } + + return res +} + +func kafkaProducerSpanName(topic string) string { + return fmt.Sprintf("%s publish", topic) +} diff --git a/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/probe_test.go b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/probe_test.go new file mode 100644 index 000000000..ac43bf56c --- /dev/null +++ b/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer/probe_test.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package producer + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/auto/internal/pkg/instrumentation/probe" +) + +func TestProbeConvertEvent(t *testing.T) { + start := time.Now() + end := start.Add(1 * time.Second) + + traceID := trace.TraceID{1} + + got := convertEvent(&event{ + StartTime: uint64(start.UnixNano()), + EndTime: uint64(end.UnixNano()), + TraceID: traceID, + Messages: [10]messageAttributes{ + { + // topic1 + Topic: [256]byte{0x74, 0x6f, 0x70, 0x69, 0x63, 0x31}, + // key1 + Key: [256]byte{0x6b, 0x65, 0x79, 0x31}, + SpaID: trace.SpanID{1}, + }, + { + // topic2 + Topic: [256]byte{0x74, 0x6f, 0x70, 0x69, 0x63, 0x32}, + // key2 + Key: [256]byte{0x6b, 0x65, 0x79, 0x32}, + SpaID: trace.SpanID{2}, + }, + }, + ValidMessages: 2, + }) + + sc1 := trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: traceID, + SpanID: trace.SpanID{1}, + TraceFlags: trace.FlagsSampled, + }) + sc2 := trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: traceID, + SpanID: trace.SpanID{2}, + TraceFlags: trace.FlagsSampled, + }) + want1 := &probe.SpanEvent{ + SpanName: kafkaProducerSpanName("topic1"), + StartTime: int64(start.UnixNano()), + EndTime: int64(end.UnixNano()), + SpanContext: &sc1, + Attributes: []attribute.KeyValue{ + semconv.MessagingKafkaMessageKey("key1"), + semconv.MessagingDestinationName("topic1"), + semconv.MessagingSystemKafka, + semconv.MessagingOperationPublish, + semconv.MessagingBatchMessageCount(2), + }, + } + + want2 := &probe.SpanEvent{ + SpanName: kafkaProducerSpanName("topic2"), + StartTime: int64(start.UnixNano()), + EndTime: int64(end.UnixNano()), + SpanContext: &sc2, + Attributes: []attribute.KeyValue{ + semconv.MessagingKafkaMessageKey("key2"), + semconv.MessagingDestinationName("topic2"), + semconv.MessagingSystemKafka, + semconv.MessagingOperationPublish, + semconv.MessagingBatchMessageCount(2), + }, + } + assert.Equal(t, want1, got[0]) + assert.Equal(t, want2, got[1]) +} diff --git a/internal/pkg/instrumentation/bpf/go.opentelemetry.io/otel/traceglobal/probe.go b/internal/pkg/instrumentation/bpf/go.opentelemetry.io/otel/traceglobal/probe.go index 9e8b7049a..f0b090971 100644 --- a/internal/pkg/instrumentation/bpf/go.opentelemetry.io/otel/traceglobal/probe.go +++ b/internal/pkg/instrumentation/bpf/go.opentelemetry.io/otel/traceglobal/probe.go @@ -254,7 +254,7 @@ type event struct { Attributes attributesBuffer } -func convertEvent(e *event) *probe.SpanEvent { +func convertEvent(e *event) []*probe.SpanEvent { spanName := unix.ByteSliceToString(e.SpanName[:]) sc := trace.NewSpanContext(trace.SpanContextConfig{ @@ -276,16 +276,18 @@ func convertEvent(e *event) *probe.SpanEvent { pscPtr = nil } - return &probe.SpanEvent{ - SpanName: spanName, - StartTime: int64(e.StartTime), - EndTime: int64(e.EndTime), - Attributes: convertAttributes(e.Attributes), - SpanContext: &sc, - ParentSpanContext: pscPtr, - Status: probe.Status{ - Code: codes.Code(e.Status.Code), - Description: string(unix.ByteSliceToString(e.Status.Description[:])), + return []*probe.SpanEvent{ + { + SpanName: spanName, + StartTime: int64(e.StartTime), + EndTime: int64(e.EndTime), + Attributes: convertAttributes(e.Attributes), + SpanContext: &sc, + ParentSpanContext: pscPtr, + Status: probe.Status{ + Code: codes.Code(e.Status.Code), + Description: string(unix.ByteSliceToString(e.Status.Description[:])), + }, }, } } diff --git a/internal/pkg/instrumentation/bpf/go.opentelemetry.io/otel/traceglobal/probe_test.go b/internal/pkg/instrumentation/bpf/go.opentelemetry.io/otel/traceglobal/probe_test.go index 851972576..10d20f873 100644 --- a/internal/pkg/instrumentation/bpf/go.opentelemetry.io/otel/traceglobal/probe_test.go +++ b/internal/pkg/instrumentation/bpf/go.opentelemetry.io/otel/traceglobal/probe_test.go @@ -118,5 +118,5 @@ func TestProbeConvertEvent(t *testing.T) { attribute.String("string_key2", "string value 2"), }, } - assert.Equal(t, want, got) + assert.Equal(t, want, got[0]) } diff --git a/internal/pkg/instrumentation/bpf/google.golang.org/grpc/client/bpf/probe.bpf.c b/internal/pkg/instrumentation/bpf/google.golang.org/grpc/client/bpf/probe.bpf.c index 570ddb049..48edcde0e 100644 --- a/internal/pkg/instrumentation/bpf/google.golang.org/grpc/client/bpf/probe.bpf.c +++ b/internal/pkg/instrumentation/bpf/google.golang.org/grpc/client/bpf/probe.bpf.c @@ -140,15 +140,6 @@ int uprobe_LoopyWriter_HeaderHandler(struct pt_regs *ctx) return 0; } - struct go_slice slice = {}; - struct go_slice_user_ptr slice_user_ptr = {}; - slice_user_ptr.array = (void *)(headerFrame_ptr + (headerFrame_hf_pos)); - slice_user_ptr.len = (void *)(headerFrame_ptr + (headerFrame_hf_pos + 8)); - slice_user_ptr.cap = (void *)(headerFrame_ptr + (headerFrame_hf_pos + 16)); - bpf_probe_read(&slice.array, sizeof(slice.array), slice_user_ptr.array); - bpf_probe_read(&slice.len, sizeof(slice.len), slice_user_ptr.len); - bpf_probe_read(&slice.cap, sizeof(slice.cap), slice_user_ptr.cap); - struct span_context current_span_context = {}; bpf_probe_read(¤t_span_context, sizeof(current_span_context), sc_ptr); @@ -170,7 +161,7 @@ int uprobe_LoopyWriter_HeaderHandler(struct pt_regs *ctx) struct hpack_header_field hf = {}; hf.name = key_str; hf.value = val_str; - append_item_to_slice(&slice, &hf, sizeof(hf), &slice_user_ptr); + append_item_to_slice(&hf, sizeof(hf), (void *)(headerFrame_ptr + (headerFrame_hf_pos))); done: bpf_map_delete_elem(&streamid_to_span_contexts, &stream_id); diff --git a/internal/pkg/instrumentation/bpf/google.golang.org/grpc/client/probe.go b/internal/pkg/instrumentation/bpf/google.golang.org/grpc/client/probe.go index c26624eb7..878ff5f76 100644 --- a/internal/pkg/instrumentation/bpf/google.golang.org/grpc/client/probe.go +++ b/internal/pkg/instrumentation/bpf/google.golang.org/grpc/client/probe.go @@ -152,7 +152,7 @@ type event struct { } // According to https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md -func convertEvent(e *event) *probe.SpanEvent { +func convertEvent(e *event) []*probe.SpanEvent { method := unix.ByteSliceToString(e.Method[:]) target := unix.ByteSliceToString(e.Target[:]) var attrs []attribute.KeyValue @@ -188,12 +188,14 @@ func convertEvent(e *event) *probe.SpanEvent { pscPtr = nil } - return &probe.SpanEvent{ - SpanName: method, - StartTime: int64(e.StartTime), - EndTime: int64(e.EndTime), - Attributes: attrs, - SpanContext: &sc, - ParentSpanContext: pscPtr, + return []*probe.SpanEvent{ + { + SpanName: method, + StartTime: int64(e.StartTime), + EndTime: int64(e.EndTime), + Attributes: attrs, + SpanContext: &sc, + ParentSpanContext: pscPtr, + }, } } diff --git a/internal/pkg/instrumentation/bpf/google.golang.org/grpc/server/bpf/probe.bpf.c b/internal/pkg/instrumentation/bpf/google.golang.org/grpc/server/bpf/probe.bpf.c index 8d6e0be79..0714b0ac4 100644 --- a/internal/pkg/instrumentation/bpf/google.golang.org/grpc/server/bpf/probe.bpf.c +++ b/internal/pkg/instrumentation/bpf/google.golang.org/grpc/server/bpf/probe.bpf.c @@ -24,8 +24,6 @@ char __license[] SEC("license") = "Dual MIT/GPL"; #define MAX_CONCURRENT 50 #define MAX_HEADERS 20 #define MAX_HEADER_STRING 50 -#define W3C_KEY_LENGTH 11 -#define W3C_VAL_LENGTH 55 struct grpc_request_t { diff --git a/internal/pkg/instrumentation/bpf/google.golang.org/grpc/server/probe.go b/internal/pkg/instrumentation/bpf/google.golang.org/grpc/server/probe.go index e3470bb5e..51dfae61b 100644 --- a/internal/pkg/instrumentation/bpf/google.golang.org/grpc/server/probe.go +++ b/internal/pkg/instrumentation/bpf/google.golang.org/grpc/server/probe.go @@ -163,7 +163,7 @@ type event struct { Method [100]byte } -func convertEvent(e *event) *probe.SpanEvent { +func convertEvent(e *event) []*probe.SpanEvent { method := unix.ByteSliceToString(e.Method[:]) sc := trace.NewSpanContext(trace.SpanContextConfig{ @@ -185,15 +185,17 @@ func convertEvent(e *event) *probe.SpanEvent { pscPtr = nil } - return &probe.SpanEvent{ - SpanName: method, - StartTime: int64(e.StartTime), - EndTime: int64(e.EndTime), - Attributes: []attribute.KeyValue{ - semconv.RPCSystemKey.String("grpc"), - semconv.RPCServiceKey.String(method), + return []*probe.SpanEvent{ + { + SpanName: method, + StartTime: int64(e.StartTime), + EndTime: int64(e.EndTime), + Attributes: []attribute.KeyValue{ + semconv.RPCSystemKey.String("grpc"), + semconv.RPCServiceKey.String(method), + }, + ParentSpanContext: pscPtr, + SpanContext: &sc, }, - ParentSpanContext: pscPtr, - SpanContext: &sc, } } diff --git a/internal/pkg/instrumentation/bpf/net/http/client/bpf/probe.bpf.c b/internal/pkg/instrumentation/bpf/net/http/client/bpf/probe.bpf.c index 18fa211ff..1c93be7fc 100644 --- a/internal/pkg/instrumentation/bpf/net/http/client/bpf/probe.bpf.c +++ b/internal/pkg/instrumentation/bpf/net/http/client/bpf/probe.bpf.c @@ -10,8 +10,6 @@ char __license[] SEC("license") = "Dual MIT/GPL"; #define MAX_PROTO_SIZE 8 #define MAX_PATH_SIZE 100 #define MAX_METHOD_SIZE 10 -#define W3C_KEY_LENGTH 11 -#define W3C_VAL_LENGTH 55 #define MAX_CONCURRENT 50 struct http_request_t { diff --git a/internal/pkg/instrumentation/bpf/net/http/client/probe.go b/internal/pkg/instrumentation/bpf/net/http/client/probe.go index c39fd3d7f..39033379b 100644 --- a/internal/pkg/instrumentation/bpf/net/http/client/probe.go +++ b/internal/pkg/instrumentation/bpf/net/http/client/probe.go @@ -156,7 +156,7 @@ type event struct { Path [100]byte } -func convertEvent(e *event) *probe.SpanEvent { +func convertEvent(e *event) []*probe.SpanEvent { method := unix.ByteSliceToString(e.Method[:]) path := unix.ByteSliceToString(e.Path[:]) @@ -202,12 +202,14 @@ func convertEvent(e *event) *probe.SpanEvent { } } - return &probe.SpanEvent{ - SpanName: method, - StartTime: int64(e.StartTime), - EndTime: int64(e.EndTime), - SpanContext: &sc, - Attributes: attrs, - ParentSpanContext: pscPtr, + return []*probe.SpanEvent{ + { + SpanName: method, + StartTime: int64(e.StartTime), + EndTime: int64(e.EndTime), + SpanContext: &sc, + Attributes: attrs, + ParentSpanContext: pscPtr, + }, } } diff --git a/internal/pkg/instrumentation/bpf/net/http/server/bpf/probe.bpf.c b/internal/pkg/instrumentation/bpf/net/http/server/bpf/probe.bpf.c index 549798c63..0b49617f8 100644 --- a/internal/pkg/instrumentation/bpf/net/http/server/bpf/probe.bpf.c +++ b/internal/pkg/instrumentation/bpf/net/http/server/bpf/probe.bpf.c @@ -24,8 +24,6 @@ char __license[] SEC("license") = "Dual MIT/GPL"; #define MAX_BUCKETS 8 #define METHOD_MAX_LEN 8 #define MAX_CONCURRENT 50 -#define W3C_KEY_LENGTH 11 -#define W3C_VAL_LENGTH 55 #define REMOTE_ADDR_MAX_LEN 256 #define HOST_MAX_LEN 256 #define PROTO_MAX_LEN 8 diff --git a/internal/pkg/instrumentation/bpf/net/http/server/probe.go b/internal/pkg/instrumentation/bpf/net/http/server/probe.go index 130f9cdf0..a5abc393b 100644 --- a/internal/pkg/instrumentation/bpf/net/http/server/probe.go +++ b/internal/pkg/instrumentation/bpf/net/http/server/probe.go @@ -153,7 +153,7 @@ type event struct { Proto [8]byte } -func convertEvent(e *event) *probe.SpanEvent { +func convertEvent(e *event) []*probe.SpanEvent { method := unix.ByteSliceToString(e.Method[:]) path := unix.ByteSliceToString(e.Path[:]) proto := unix.ByteSliceToString(e.Proto[:]) @@ -208,14 +208,16 @@ func convertEvent(e *event) *probe.SpanEvent { } } - return &probe.SpanEvent{ - // Do not include the high-cardinality path here (there is no - // templatized path manifest to reference). - SpanName: method, - StartTime: int64(e.StartTime), - EndTime: int64(e.EndTime), - SpanContext: &sc, - ParentSpanContext: pscPtr, - Attributes: attributes, + return []*probe.SpanEvent{ + { + // Do not include the high-cardinality path here (there is no + // templatized path manifest to reference). + SpanName: method, + StartTime: int64(e.StartTime), + EndTime: int64(e.EndTime), + SpanContext: &sc, + ParentSpanContext: pscPtr, + Attributes: attributes, + }, } } diff --git a/internal/pkg/instrumentation/bpf/net/http/server/probe_test.go b/internal/pkg/instrumentation/bpf/net/http/server/probe_test.go index 18aae5371..2d415465e 100644 --- a/internal/pkg/instrumentation/bpf/net/http/server/probe_test.go +++ b/internal/pkg/instrumentation/bpf/net/http/server/probe_test.go @@ -75,5 +75,5 @@ func TestProbeConvertEvent(t *testing.T) { semconv.NetworkProtocolVersion("1.1"), }, } - assert.Equal(t, want, got) + assert.Equal(t, want, got[0]) } diff --git a/internal/pkg/instrumentation/manager.go b/internal/pkg/instrumentation/manager.go index d62e97cd0..0166ec936 100644 --- a/internal/pkg/instrumentation/manager.go +++ b/internal/pkg/instrumentation/manager.go @@ -25,6 +25,8 @@ import ( "github.com/go-logr/logr" dbSql "go.opentelemetry.io/auto/internal/pkg/instrumentation/bpf/database/sql" + kafkaConsumer "go.opentelemetry.io/auto/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/consumer" + kafkaProducer "go.opentelemetry.io/auto/internal/pkg/instrumentation/bpf/github.com/segmentio/kafka-go/producer" otelTraceGlobal "go.opentelemetry.io/auto/internal/pkg/instrumentation/bpf/go.opentelemetry.io/otel/traceglobal" grpcClient "go.opentelemetry.io/auto/internal/pkg/instrumentation/bpf/google.golang.org/grpc/client" grpcServer "go.opentelemetry.io/auto/internal/pkg/instrumentation/bpf/google.golang.org/grpc/server" @@ -220,6 +222,8 @@ func (m *Manager) registerProbes() error { httpServer.New(m.logger), httpClient.New(m.logger), dbSql.New(m.logger), + kafkaProducer.New(m.logger), + kafkaConsumer.New(m.logger), } if m.globalImpl { diff --git a/internal/pkg/instrumentation/probe/event.go b/internal/pkg/instrumentation/probe/event.go index f027c8eba..e3d4b6e99 100644 --- a/internal/pkg/instrumentation/probe/event.go +++ b/internal/pkg/instrumentation/probe/event.go @@ -22,9 +22,9 @@ import ( // Event is a telemetry event that happens within an instrumented package. type Event struct { - Package string - Kind trace.SpanKind - SpanEvent + Package string + Kind trace.SpanKind + SpanEvents []*SpanEvent } type Status struct { diff --git a/internal/pkg/instrumentation/probe/probe.go b/internal/pkg/instrumentation/probe/probe.go index 5e77599ed..f553dc7ad 100644 --- a/internal/pkg/instrumentation/probe/probe.go +++ b/internal/pkg/instrumentation/probe/probe.go @@ -76,7 +76,7 @@ type Base[BPFObj any, BPFEvent any] struct { // probe. SpecFn func() (*ebpf.CollectionSpec, error) // ProcessFn processes probe events into a uniform Event type. - ProcessFn func(*BPFEvent) *SpanEvent + ProcessFn func(*BPFEvent) []*SpanEvent reader *perf.Reader closers []io.Closer @@ -183,16 +183,16 @@ func (i *Base[BPFObj, BPFEvent]) Run(dest chan<- *Event) { i.Logger.Error(err, "failed to process perf record") } e := &Event{ - Package: i.ID.InstrumentedPkg, - Kind: i.ID.SpanKind, - SpanEvent: *se, + Package: i.ID.InstrumentedPkg, + Kind: i.ID.SpanKind, + SpanEvents: se, } dest <- e } } -func (i *Base[BPFObj, BPFEvent]) processRecord(record perf.Record) (*SpanEvent, error) { +func (i *Base[BPFObj, BPFEvent]) processRecord(record perf.Record) ([]*SpanEvent, error) { buf := bytes.NewBuffer(record.RawSample) var event BPFEvent diff --git a/internal/pkg/instrumentation/utils/ebpf.go b/internal/pkg/instrumentation/utils/ebpf.go index 345c160a3..dd582222e 100644 --- a/internal/pkg/instrumentation/utils/ebpf.go +++ b/internal/pkg/instrumentation/utils/ebpf.go @@ -34,7 +34,7 @@ func LoadEBPFObjects(spec *ebpf.CollectionSpec, to interface{}, opts *ebpf.Colle // Getting full verifier log is expensive, so we only do it if the user explicitly asks for it. showVerifierLogs := shouldShowVerifierLogs() if showVerifierLogs { - opts.Programs.LogSize = ebpf.DefaultVerifierLogSize * 100 + opts.Programs.LogSize = ebpf.DefaultVerifierLogSize * 10000 opts.Programs.LogLevel = ebpf.LogLevelInstruction | ebpf.LogLevelBranch | ebpf.LogLevelStats } diff --git a/internal/pkg/opentelemetry/controller.go b/internal/pkg/opentelemetry/controller.go index 9b8b89f10..d34dae24f 100644 --- a/internal/pkg/opentelemetry/controller.go +++ b/internal/pkg/opentelemetry/controller.go @@ -51,27 +51,29 @@ func (c *Controller) getTracer(pkg string) trace.Tracer { // Trace creates a trace span for event. func (c *Controller) Trace(event *probe.Event) { - c.logger.Info("got event", "attrs", event.Attributes, "status", event.Status) - ctx := context.Background() + for _, se := range event.SpanEvents { + c.logger.Info("got event", "kind", event.Kind.String(), "pkg", event.Package, "attrs", se.Attributes) + ctx := context.Background() - if event.SpanContext == nil { - c.logger.Info("got event without context - dropping") - return - } + if se.SpanContext == nil { + c.logger.Info("got event without context - dropping") + return + } - // TODO: handle remote parent - if event.ParentSpanContext != nil { - ctx = trace.ContextWithSpanContext(ctx, *event.ParentSpanContext) - } + // TODO: handle remote parent + if se.ParentSpanContext != nil { + ctx = trace.ContextWithSpanContext(ctx, *se.ParentSpanContext) + } - ctx = ContextWithEBPFEvent(ctx, *event) - _, span := c.getTracer(event.Package). - Start(ctx, event.SpanName, - trace.WithAttributes(event.Attributes...), - trace.WithSpanKind(event.Kind), - trace.WithTimestamp(c.convertTime(event.StartTime))) - span.SetStatus(event.Status.Code, event.Status.Description) - span.End(trace.WithTimestamp(c.convertTime(event.EndTime))) + ctx = ContextWithEBPFEvent(ctx, *se) + _, span := c.getTracer(event.Package). + Start(ctx, se.SpanName, + trace.WithAttributes(se.Attributes...), + trace.WithSpanKind(event.Kind), + trace.WithTimestamp(c.convertTime(se.StartTime))) + span.SetStatus(se.Status.Code, se.Status.Description) + span.End(trace.WithTimestamp(c.convertTime(se.EndTime))) + } } func (c *Controller) convertTime(t int64) time.Time { diff --git a/internal/pkg/opentelemetry/id_generator.go b/internal/pkg/opentelemetry/id_generator.go index f1f424ec1..18edda214 100644 --- a/internal/pkg/opentelemetry/id_generator.go +++ b/internal/pkg/opentelemetry/id_generator.go @@ -31,18 +31,18 @@ func NewEBPFSourceIDGenerator() *EBPFSourceIDGenerator { } // ContextWithEBPFEvent returns a copy of parent in which event is stored. -func ContextWithEBPFEvent(parent context.Context, event probe.Event) context.Context { +func ContextWithEBPFEvent(parent context.Context, event probe.SpanEvent) context.Context { return context.WithValue(parent, eBPFEventKey{}, event) } // EventFromContext returns the event within ctx if one exists. -func EventFromContext(ctx context.Context) *probe.Event { +func EventFromContext(ctx context.Context) *probe.SpanEvent { val := ctx.Value(eBPFEventKey{}) if val == nil { return nil } - event, ok := val.(probe.Event) + event, ok := val.(probe.SpanEvent) if !ok { return nil } diff --git a/internal/test/e2e/kafka-go/Dockerfile b/internal/test/e2e/kafka-go/Dockerfile new file mode 100644 index 000000000..e98d08a3b --- /dev/null +++ b/internal/test/e2e/kafka-go/Dockerfile @@ -0,0 +1,20 @@ +FROM golang:1.22.1 AS builder +WORKDIR /sample-app +COPY . . +RUN go mod init go.opentelemetry.io/auto/internal/test/e2e/kafka-go && go mod tidy && CGO_ENABLED=0 go build -o main + +FROM bitnami/kafka:latest + +USER root +ENV KAFKA_CFG_PROCESS_ROLES=controller,broker +ENV KAFKA_CFG_NODE_ID=0 +ENV KAFKA_CFG_LISTENERS=PLAINTEXT://127.0.0.1:9092,CONTROLLER://:9093 +ENV KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT +ENV KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@127.0.0.1:9093 +ENV KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER +WORKDIR /sample-app +COPY --from=builder /sample-app/main . +COPY start.sh . +RUN chmod +x start.sh +WORKDIR / +ENTRYPOINT ["/sample-app/start.sh"] \ No newline at end of file diff --git a/internal/test/e2e/kafka-go/collector-helm-values.yml b/internal/test/e2e/kafka-go/collector-helm-values.yml new file mode 100644 index 000000000..fae166cf2 --- /dev/null +++ b/internal/test/e2e/kafka-go/collector-helm-values.yml @@ -0,0 +1,57 @@ +mode: "statefulset" + +config: + receivers: + otlp: + protocols: + http: + endpoint: ${env:MY_POD_IP}:4318 + + exporters: + debug: {} + file/trace: + path: /tmp/trace.json + rotation: + processors: + batch: + # if timeout will trigger the test will fail due to missing spans + # we expect to have 3 spans in the file + send_batch_size: 3 + timeout: 100s + + service: + telemetry: + logs: + level: "debug" + pipelines: + traces: + receivers: + - otlp + processors: + - batch + exporters: + - file/trace + - debug + + +image: + repository: otel/opentelemetry-collector-contrib + tag: "latest" + +command: + name: otelcol-contrib + +extraVolumes: +- name: filevolume + emptyDir: {} +extraVolumeMounts: +- mountPath: /tmp + name: filevolume + +extraContainers: +- name: filecp + image: busybox + command: ["sh", "-c", "sleep 36000"] + volumeMounts: + - name: filevolume + mountPath: /tmp diff --git a/internal/test/e2e/kafka-go/main.go b/internal/test/e2e/kafka-go/main.go new file mode 100644 index 000000000..ceacf4144 --- /dev/null +++ b/internal/test/e2e/kafka-go/main.go @@ -0,0 +1,117 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "fmt" + "time" + + kafka "github.com/segmentio/kafka-go" +) + +func produceMessages(kafkaWriter *kafka.Writer) { + msg1 := kafka.Message{ + Key: []byte("key1"), + Value: []byte("value1"), + Topic: "topic1", + Headers: []kafka.Header{ + { + Key: "header1", + Value: []byte("value1"), + }, + }, + } + msg2 := kafka.Message{ + Key: []byte("key2"), + Value: []byte("value2"), + Topic: "topic2", + } + msgs := []kafka.Message{msg1, msg2} + err := kafkaWriter.WriteMessages(context.Background(), + msgs..., + ) + if err != nil { + fmt.Printf("failed to write messages: %v\n", err) + } +} + +func getKafkaWriter() *kafka.Writer { + return &kafka.Writer{ + Addr: kafka.TCP("127.0.0.1:9092"), + Balancer: &kafka.LeastBytes{}, + Async: true, + RequiredAcks: 1, + WriteBackoffMax: 1 * time.Millisecond, + BatchTimeout: 1 * time.Millisecond, + } +} + +func getKafkaReader() *kafka.Reader { + return kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"127.0.0.1:9092"}, + GroupID: "some group id", + Topic: "topic1", + ReadBatchTimeout: 1 * time.Millisecond, + }) +} + +func reader(readChan chan bool) { + reader := getKafkaReader() + + defer reader.Close() + + fmt.Println("start consuming ... !!") + for { + _, err := reader.ReadMessage(context.Background()) + if err != nil { + fmt.Printf("failed to read message: %v\n", err) + } + readChan <- true + } +} + +func main() { + kafkaWriter := getKafkaWriter() + defer kafkaWriter.Close() + + // to create topics when auto.create.topics.enable='true' + fmt.Println("trying to connect to kafka") + for range time.Tick(5 * time.Second) { + _, err := kafka.DialLeader(context.Background(), "tcp", "127.0.0.1:9092", "topic1", 0) + if err == nil { + break + } + fmt.Println("failed to connect to kafka, retrying...") + } + + fmt.Println("successfully connected to kafka") + _, err := kafka.DialLeader(context.Background(), "tcp", "127.0.0.1:9092", "topic2", 0) + if err != nil { + panic(err.Error()) + } + + readChan := make(chan bool) + go reader(readChan) + + // give time for auto-instrumentation to start up + time.Sleep(5 * time.Second) + + produceMessages(kafkaWriter) + <-readChan + + // give time for auto-instrumentation to report signal + time.Sleep(5 * time.Second) +} diff --git a/internal/test/e2e/kafka-go/start.sh b/internal/test/e2e/kafka-go/start.sh new file mode 100644 index 000000000..68cd6a292 --- /dev/null +++ b/internal/test/e2e/kafka-go/start.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# launch kafka and wait for it to be ready +/opt/bitnami/scripts/kafka/entrypoint.sh /opt/bitnami/scripts/kafka/run.sh & + +while ! kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --topic hc --create --if-not-exists && kafka-topics.sh --bootstrap-server 127.0.0.1:9092 --topic hc --describe; do + echo "kafka is not available yet. Retrying in 1 second..." + sleep 1 +done + +# # Run the Go application +/sample-app/main \ No newline at end of file diff --git a/internal/test/e2e/kafka-go/traces.json b/internal/test/e2e/kafka-go/traces.json new file mode 100644 index 000000000..ca4bf0e58 --- /dev/null +++ b/internal/test/e2e/kafka-go/traces.json @@ -0,0 +1,242 @@ +{ + "resourceSpans": [ + { + "resource": { + "attributes": [ + { + "key": "process.runtime.description", + "value": { + "stringValue": "go version 1.22.2 linux/amd64" + } + }, + { + "key": "process.runtime.name", + "value": { + "stringValue": "go" + } + }, + { + "key": "process.runtime.version", + "value": { + "stringValue": "1.22.2" + } + }, + { + "key": "service.name", + "value": { + "stringValue": "sample-app" + } + }, + { + "key": "telemetry.auto.version", + "value": { + "stringValue": "v0.12.0-alpha" + } + }, + { + "key": "telemetry.sdk.language", + "value": { + "stringValue": "go" + } + } + ] + }, + "schemaUrl": "https://opentelemetry.io/schemas/1.21.0", + "scopeSpans": [ + { + "scope": { + "name": "go.opentelemetry.io/auto/github.com/segmentio/kafka-go", + "version": "v0.12.0-alpha" + }, + "spans": [ + { + "attributes": [ + { + "key": "messaging.kafka.message.key", + "value": { + "stringValue": "key1" + } + }, + { + "key": "messaging.destination.name", + "value": { + "stringValue": "topic1" + } + }, + { + "key": "messaging.system", + "value": { + "stringValue": "kafka" + } + }, + { + "key": "messaging.operation", + "value": { + "stringValue": "publish" + } + }, + { + "key": "messaging.batch.message_count", + "value": { + "intValue": "2" + } + } + ], + "kind": 4, + "name": "topic1 publish", + "parentSpanId": "", + "spanId": "xxxxx", + "status": {}, + "traceId": "xxxxx" + }, + { + "attributes": [ + { + "key": "messaging.kafka.message.key", + "value": { + "stringValue": "key2" + } + }, + { + "key": "messaging.destination.name", + "value": { + "stringValue": "topic2" + } + }, + { + "key": "messaging.system", + "value": { + "stringValue": "kafka" + } + }, + { + "key": "messaging.operation", + "value": { + "stringValue": "publish" + } + }, + { + "key": "messaging.batch.message_count", + "value": { + "intValue": "2" + } + } + ], + "kind": 4, + "name": "topic2 publish", + "parentSpanId": "", + "spanId": "xxxxx", + "status": {}, + "traceId": "xxxxx" + } + ] + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "process.runtime.description", + "value": { + "stringValue": "go version 1.22.2 linux/amd64" + } + }, + { + "key": "process.runtime.name", + "value": { + "stringValue": "go" + } + }, + { + "key": "process.runtime.version", + "value": { + "stringValue": "1.22.2" + } + }, + { + "key": "service.name", + "value": { + "stringValue": "sample-app" + } + }, + { + "key": "telemetry.auto.version", + "value": { + "stringValue": "v0.12.0-alpha" + } + }, + { + "key": "telemetry.sdk.language", + "value": { + "stringValue": "go" + } + } + ] + }, + "schemaUrl": "https://opentelemetry.io/schemas/1.21.0", + "scopeSpans": [ + { + "scope": { + "name": "go.opentelemetry.io/auto/github.com/segmentio/kafka-go", + "version": "v0.12.0-alpha" + }, + "spans": [ + { + "attributes": [ + { + "key": "messaging.system", + "value": { + "stringValue": "kafka" + } + }, + { + "key": "messaging.operation", + "value": { + "stringValue": "receive" + } + }, + { + "key": "messaging.kafka.destination.partition", + "value": { + "intValue": "0" + } + }, + { + "key": "messaging.destination.name", + "value": { + "stringValue": "topic1" + } + }, + { + "key": "messaging.kafka.message.offset", + "value": { + "intValue": "0" + } + }, + { + "key": "messaging.kafka.message.key", + "value": { + "stringValue": "key1" + } + }, + { + "key": "messaging.kafka.consumer.group", + "value": { + "stringValue": "some group id" + } + } + ], + "kind": 5, + "name": "topic1 receive", + "parentSpanId": "xxxxx", + "spanId": "xxxxx", + "status": {}, + "traceId": "xxxxx" + } + ] + } + ] + } + ] +} diff --git a/internal/test/e2e/kafka-go/verify.bats b/internal/test/e2e/kafka-go/verify.bats new file mode 100644 index 000000000..fda0c5bf8 --- /dev/null +++ b/internal/test/e2e/kafka-go/verify.bats @@ -0,0 +1,104 @@ +#!/usr/bin/env bats + +load ../../test_helpers/utilities + +SCOPE="go.opentelemetry.io/auto/github.com/segmentio/kafka-go" + +@test "go-auto :: includes service.name in resource attributes" { + result=$(resource_attributes_received | jq "select(.key == \"service.name\").value.stringValue") + result_separated=$(echo $result | sed 's/\n/,/g') + assert_equal "$result_separated" '"sample-app" "sample-app"' +} + +@test "kafka producer,consumer :: valid {messaging.system} for all spans" { + messaging_systems=$(span_attributes_for ${SCOPE} | jq "select(.key == \"messaging.system\").value.stringValue") + result_separated=$(echo $messaging_systems | sed 's/\n/,/g') + assert_equal "$result_separated" '"kafka" "kafka" "kafka"' +} + +@test "producer :: valid {messaging.destination.name} for all spans" { + topics=$(producer_span_attributes_for ${SCOPE} | jq "select(.key == \"messaging.destination.name\").value.stringValue" | sort ) + result_separated=$(echo $topics | sed 's/\n/,/g') + assert_equal "$result_separated" '"topic1" "topic2"' +} + +@test "consumer :: valid {messaging.destination.name} for all spans" { + topics=$(consumer_span_attributes_for ${SCOPE} | jq "select(.key == \"messaging.destination.name\").value.stringValue" | sort ) + assert_equal "$topics" '"topic1"' +} + +@test "producer :: valid {messaging.kafka.message.key} for all spans" { + keys=$(producer_span_attributes_for ${SCOPE} | jq "select(.key == \"messaging.kafka.message.key\").value.stringValue" | sort ) + result_separated=$(echo $keys | sed 's/\n/,/g') + assert_equal "$result_separated" '"key1" "key2"' +} + +@test "producer :: valid {messaging.batch.message_count} for all spans" { + batch_sizes=$(producer_span_attributes_for ${SCOPE} | jq "select(.key == \"messaging.batch.message_count\").value.intValue") + result_separated=$(echo $batch_sizes | sed 's/\n/,/g') + assert_equal "$result_separated" '"2" "2"' +} + +@test "consumer :: valid {messaging.kafka.message.key}" { + topics=$(consumer_span_attributes_for ${SCOPE} | jq "select(.key == \"messaging.kafka.message.key\").value.stringValue" | sort ) + assert_equal "$topics" '"key1"' +} + +@test "consumer :: valid {messaging.kafka.destination.partition}" { + partition=$(consumer_span_attributes_for ${SCOPE} | jq "select(.key == \"messaging.kafka.destination.partition\").value.intValue" | sort ) + assert_equal "$partition" '"0"' +} + +@test "consumer :: valid {messaging.kafka.message.offset}" { + offset=$(consumer_span_attributes_for ${SCOPE} | jq "select(.key == \"messaging.kafka.message.offset\").value.intValue" | sort ) + assert_equal "$offset" '"0"' +} + +@test "consumer :: valid {messaging.kafka.consumer.group}" { + consumer_group=$(consumer_span_attributes_for ${SCOPE} | jq "select(.key == \"messaging.kafka.consumer.group\").value.stringValue" | sort ) + assert_equal "$consumer_group" '"some group id"' +} + +@test "producer :: valid span names" { + span_names=$(producer_spans_from_scope_named ${SCOPE} | jq ".name" | sort) + result_separated=$(echo $span_names | sed 's/\n/,/g') + assert_equal "$result_separated" '"topic1 publish" "topic2 publish"' +} + +@test "consumer :: valid span names" { + span_names=$(consumer_spans_from_scope_named ${SCOPE} | jq ".name") + result_separated=$(echo $span_names | sed 's/\n/,/g') + assert_equal "$result_separated" '"topic1 receive"' +} + +@test "consumer :: trace ID present and valid in all spans" { + trace_id=$(consumer_spans_from_scope_named ${SCOPE} | jq ".traceId") + assert_regex "$trace_id" ${MATCH_A_TRACE_ID} +} + +@test "consumer :: span ID present and valid in all spans" { + span_id=$(consumer_spans_from_scope_named ${SCOPE} | jq ".spanId") + assert_regex "$span_id" ${MATCH_A_SPAN_ID} +} + +@test "consumer :: parent span ID present and valid in all spans" { + parent_span_id=$(consumer_spans_from_scope_named ${SCOPE} | jq ".parentSpanId") + assert_regex "$parent_span_id" ${MATCH_A_SPAN_ID} +} + +@test "producer, consumer :: spans have same trace ID" { + producer_trace_id=$(producer_spans_from_scope_named ${SCOPE} | jq ".traceId" | uniq) + consumer_trace_id=$(consumer_spans_from_scope_named ${SCOPE} | jq ".traceId") + assert_equal "$producer_trace_id" "$consumer_trace_id" +} + +@test "producer, consumer :: consumer span has producer span as parent" { + consumer_parent_span_id=$(consumer_spans_from_scope_named ${SCOPE} | jq ".parentSpanId") + producer_span_id=$(producer_spans_from_scope_named ${SCOPE} | jq "select(.name == \"topic1 publish\")" | jq ."spanId") + assert_equal "$producer_span_id" "$consumer_parent_span_id" +} + +@test "kafka :: expected (redacted) trace output" { + redact_json + assert_equal "$(git --no-pager diff ${BATS_TEST_DIRNAME}/traces.json)" "" +} diff --git a/internal/test/test_helpers/utilities.bash b/internal/test/test_helpers/utilities.bash index 399962b1b..00a0f17ab 100644 --- a/internal/test/test_helpers/utilities.bash +++ b/internal/test/test_helpers/utilities.bash @@ -42,6 +42,22 @@ client_span_attributes_for() { jq ".attributes[]" } + # Returns a list of attributes emitted by a given library/scope on producer spans. +producer_span_attributes_for() { + # $1 - library/scope name + + producer_spans_from_scope_named $1 | \ + jq ".attributes[]" + } + + # Returns a list of attributes emitted by a given library/scope on consumer spans. +consumer_span_attributes_for() { + # $1 - library/scope name + + consumer_spans_from_scope_named $1 | \ + jq ".attributes[]" + } + # Returns a list of all resource attributes resource_attributes_received() { spans_received | jq ".resource.attributes[]?" @@ -65,6 +81,18 @@ client_spans_from_scope_named() { spans_from_scope_named $1 | jq "select(.kind == 3)" } + # Returns an array of all producer spans emitted by a given library/scope + # $1 - library/scope name +producer_spans_from_scope_named() { + spans_from_scope_named $1 | jq "select(.kind == 4)" + } + + # Returns an array of all consumer spans emitted by a given library/scope + # $1 - library/scope name +consumer_spans_from_scope_named() { + spans_from_scope_named $1 | jq "select(.kind == 5)" + } + # Returns an array of all spans received spans_received() { json_output | jq ".resourceSpans[]?" diff --git a/internal/tools/inspect/cmd/offsetgen/main.go b/internal/tools/inspect/cmd/offsetgen/main.go index c79daf481..93e42b67a 100644 --- a/internal/tools/inspect/cmd/offsetgen/main.go +++ b/internal/tools/inspect/cmd/offsetgen/main.go @@ -85,6 +85,11 @@ func manifests() ([]inspect.Manifest, error) { return nil, fmt.Errorf("failed to get \"go.opentelemetry.io/otel\" versions: %w", err) } + kafkaGoVers, err := PkgVersions("github.com/segmentio/kafka-go") + if err != nil { + return nil, fmt.Errorf("failed to get \"github.com/segmentio/kafka-go\" versions: %w", err) + } + ren := func(src string) inspect.Renderer { return inspect.NewRenderer(logger, src, inspect.DefaultFS) } @@ -154,6 +159,24 @@ func manifests() ([]inspect.Manifest, error) { structfield.NewID("go.opentelemetry.io/otel", "go.opentelemetry.io/otel/internal/global", "tracer", "delegate"), }, }, + { + Application: inspect.Application{ + Renderer: ren("templates/github.com/segmentio/kafka-go/*.tmpl"), + Versions: kafkaGoVers, + }, + StructFields: []structfield.ID{ + structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Topic"), + structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Partition"), + structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Offset"), + structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Key"), + structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Headers"), + structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Message", "Time"), + structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Writer", "Topic"), + structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Reader", "config"), + structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "ReaderConfig", "GroupID"), + structfield.NewID("github.com/segmentio/kafka-go", "github.com/segmentio/kafka-go", "Conn", "clientID"), + }, + }, }, nil } diff --git a/internal/tools/inspect/render.go b/internal/tools/inspect/render.go index 2aae44a9d..4981220d3 100644 --- a/internal/tools/inspect/render.go +++ b/internal/tools/inspect/render.go @@ -30,6 +30,7 @@ import ( //go:embed templates/net/http/*.tmpl //go:embed templates/runtime/*.tmpl //go:embed templates/go.opentelemetry.io/otel/traceglobal/*.tmpl +//go:embed templates/github.com/segmentio/kafka-go/*.tmpl var DefaultFS embed.FS // Renderer renders templates from an fs.FS. diff --git a/internal/tools/inspect/templates/github.com/segmentio/kafka-go/go.mod.tmpl b/internal/tools/inspect/templates/github.com/segmentio/kafka-go/go.mod.tmpl new file mode 100644 index 000000000..b8b98ff91 --- /dev/null +++ b/internal/tools/inspect/templates/github.com/segmentio/kafka-go/go.mod.tmpl @@ -0,0 +1,5 @@ +module grpcapp + +go 1.12 + +require github.com/segmentio/kafka-go {{ .Version }} diff --git a/internal/tools/inspect/templates/github.com/segmentio/kafka-go/main.go.tmpl b/internal/tools/inspect/templates/github.com/segmentio/kafka-go/main.go.tmpl new file mode 100644 index 000000000..9b6f5e564 --- /dev/null +++ b/internal/tools/inspect/templates/github.com/segmentio/kafka-go/main.go.tmpl @@ -0,0 +1,14 @@ +package main + +import ( + kafka "github.com/segmentio/kafka-go" +) + +func main() { + kw := kafka.Writer{} + kr := kafka.NewReader(kafka.ReaderConfig{}) + m := kafka.Message{} + + kw.WriteMessages(nil, m) + kr.ReadMessage(nil) +} diff --git a/versions.yaml b/versions.yaml index 71d031ce9..8d48a5240 100644 --- a/versions.yaml +++ b/versions.yaml @@ -22,4 +22,5 @@ excluded-modules: - go.opentelemetry.io/auto/examples - go.opentelemetry.io/auto/examples/rolldice - go.opentelemetry.io/auto/examples/httpPlusdb + - go.opentelemetry.io/auto/examples/kafka-go - go.opentelemetry.io/auto/internal/tools