From c068b61e2bde90c7c8c9c901cd5dbb710f085f2b Mon Sep 17 00:00:00 2001 From: Christos Markou Date: Mon, 7 Oct 2024 08:17:46 +0300 Subject: [PATCH] [extension/k8sobserver] Emit endpoint per Pod's container (#35544) **Description:** As described at https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/35491, it is useful to provide the option to the users for defining `receiver_creator`'s templates per container. In this regard, the current PR introduces a new type of Endpoint called `PodContainer` that matches the rule type `pod.container`. This Endpoint is emitted for each container of the Pod similarly to how the `Port` Endpoints are emitted per container that defines a port. A complete example on how to use this feature to apply different parsing on each of the Pod's container is provided in the `How to test this manually` section. **Link to tracking Issue:** Fixes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/35491 **Testing:** TBA **Documentation:** TBA ### How to test this manually 1. Use the following values file to deploy the Collector's Helm chart ```yaml mode: daemonset image: repository: otelcontribcol-dev tag: "latest" pullPolicy: IfNotPresent command: name: otelcontribcol clusterRole: create: true rules: - apiGroups: - '' resources: - 'pods' - 'nodes' verbs: - 'get' - 'list' - 'watch' - apiGroups: [ "" ] resources: [ "nodes/proxy"] verbs: [ "get" ] - apiGroups: - "" resources: - nodes/stats verbs: - get - nonResourceURLs: - "/metrics" verbs: - get extraVolumeMounts: - name: varlogpods mountPath: /var/log/pods readOnly: true extraVolumes: - name: varlogpods hostPath: path: /var/log/pods config: extensions: k8s_observer: auth_type: serviceAccount node: ${env:K8S_NODE_NAME} observe_nodes: true exporters: debug: verbosity: basic receivers: receiver_creator/logs: watch_observers: [ k8s_observer ] receivers: filelog/busybox: rule: type == "pod.container" && pod.labels["otel.logs"] == "true" && container_name == "busybox" config: include: - /var/log/pods/`pod.namespace`_`pod.name`_`pod.uid`/`container_name`/*.log include_file_name: false include_file_path: true operators: - id: container-parser type: container - type: add field: attributes.log.template value: busybox filelog/lazybox: rule: type == "pod.container" && pod.labels["otel.logs"] == "true" && container_name == "lazybox" config: include: - /var/log/pods/`pod.namespace`_`pod.name`_`pod.uid`/`container_name`/*.log include_file_name: false include_file_path: true operators: - id: container-parser type: container - type: add field: attributes.log.template value: lazybox service: extensions: [health_check, k8s_observer] pipelines: logs: receivers: [receiver_creator/logs] processors: [batch] exporters: [debug] ``` 2. Follow the logs of the Collector's Pod i.e: `k logs -f daemonset-opentelemetry-collector-agent-2hrg5` 3. Deploy a sample Pod which consists of 2 different containers: ```yaml apiVersion: apps/v1 kind: DaemonSet metadata: name: daemonset-logs labels: app: daemonset-logs spec: selector: matchLabels: app.kubernetes.io/component: migration-logger otel.logs: "true" template: metadata: labels: app.kubernetes.io/component: migration-logger otel.logs: "true" spec: tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule containers: - name: lazybox image: busybox args: - /bin/sh - -c - while true; do echo "otel logs at $(date +%H:%M:%S)" && sleep 0.1s; done - name: busybox image: busybox args: - /bin/sh - -c - while true; do echo "otel logs at $(date +%H:%M:%S)" && sleep 0.1s; done ``` Verify in the logs that only 2 filelog receivers are started, one per container: ```console 2024-10-02T12:05:17.506Z info receivercreator@v0.110.0/observerhandler.go:96 starting receiver {"kind": "receiver", "name": "receiver_creator/logs", "data_type": "logs", "name": "filelog/lazybox", "endpoint": "10.244.0.13", "endpoint_id": "k8s_observer/01543800-cfea-4c10-8220-387e60f65151/lazybox"} 2024-10-02T12:05:17.508Z info adapter/receiver.go:47 Starting stanza receiver {"kind": "receiver", "name": "receiver_creator/logs", "data_type": "logs", "name": "filelog/lazybox/receiver_creator/logs{endpoint=\"10.244.0.13\"}/k8s_observer/01543800-cfea-4c10-8220-387e60f65151/lazybox"} 2024-10-02T12:05:17.508Z info receivercreator@v0.110.0/observerhandler.go:96 starting receiver {"kind": "receiver", "name": "receiver_creator/logs", "data_type": "logs", "name": "filelog/busybox", "endpoint": "10.244.0.13", "endpoint_id": "k8s_observer/01543800-cfea-4c10-8220-387e60f65151/busybox"} 2024-10-02T12:05:17.510Z info adapter/receiver.go:47 Starting stanza receiver {"kind": "receiver", "name": "receiver_creator/logs", "data_type": "logs", "name": "filelog/busybox/receiver_creator/logs{endpoint=\"10.244.0.13\"}/k8s_observer/01543800-cfea-4c10-8220-387e60f65151/busybox"} 2024-10-02T12:05:17.709Z info fileconsumer/file.go:256 Started watching file {"kind": "receiver", "name": "receiver_creator/logs", "data_type": "logs", "name": "filelog/lazybox/receiver_creator/logs{endpoint=\"10.244.0.13\"}/k8s_observer/01543800-cfea-4c10-8220-387e60f65151/lazybox", "component": "fileconsumer", "path": "/var/log/pods/default_daemonset-logs-sz4zk_01543800-cfea-4c10-8220-387e60f65151/lazybox/0.log"} 2024-10-02T12:05:17.712Z info fileconsumer/file.go:256 Started watching file {"kind": "receiver", "name": "receiver_creator/logs", "data_type": "logs", "name": "filelog/busybox/receiver_creator/logs{endpoint=\"10.244.0.13\"}/k8s_observer/01543800-cfea-4c10-8220-387e60f65151/busybox", "component": "fileconsumer", "path": "/var/log/pods/default_daemonset-logs-sz4zk_01543800-cfea-4c10-8220-387e60f65151/busybox/0.log"} ``` In addition verify that the proper attributes are added per container according to the 2 different filelog receiver definitions: ```console 2024-10-02T12:23:55.117Z info ResourceLog #0 Resource SchemaURL: Resource attributes: -> k8s.pod.name: Str(daemonset-logs-sz4zk) -> k8s.container.restart_count: Str(0) -> k8s.pod.uid: Str(01543800-cfea-4c10-8220-387e60f65151) -> k8s.container.name: Str(lazybox) -> k8s.namespace.name: Str(default) -> container.id: Str(63a8e69bdc6ee95ee7918baf913a548190f32838adeb0e6189a8210e05157b40) -> container.image.name: Str(busybox) ScopeLogs #0 ScopeLogs SchemaURL: InstrumentationScope LogRecord #0 ObservedTimestamp: 2024-10-02 12:23:54.896772888 +0000 UTC Timestamp: 2024-10-02 12:23:54.750904381 +0000 UTC SeverityText: SeverityNumber: Unspecified(0) Body: Str(otel logs at 12:23:54) Attributes: -> log.iostream: Str(stdout) -> logtag: Str(F) -> log: Map({"template":"lazybox"}) -> log.file.path: Str(/var/log/pods/default_daemonset-logs-sz4zk_01543800-cfea-4c10-8220-387e60f65151/lazybox/0.log) Trace ID: Span ID: Flags: 0 ResourceLog #1 Resource SchemaURL: Resource attributes: -> k8s.container.restart_count: Str(0) -> k8s.pod.uid: Str(01543800-cfea-4c10-8220-387e60f65151) -> k8s.container.name: Str(busybox) -> k8s.namespace.name: Str(default) -> k8s.pod.name: Str(daemonset-logs-sz4zk) -> container.id: Str(47163758424f2bc5382b1e9702301be23cab368b590b5fbf0b30affa09b4a199) -> container.image.name: Str(busybox) ScopeLogs #0 ScopeLogs SchemaURL: InstrumentationScope LogRecord #0 ObservedTimestamp: 2024-10-02 12:23:54.897788935 +0000 UTC Timestamp: 2024-10-02 12:23:54.749885634 +0000 UTC SeverityText: SeverityNumber: Unspecified(0) Body: Str(otel logs at 12:23:54) Attributes: -> log.file.path: Str(/var/log/pods/default_daemonset-logs-sz4zk_01543800-cfea-4c10-8220-387e60f65151/busybox/0.log) -> logtag: Str(F) -> log.iostream: Str(stdout) -> log: Map({"template":"busybox"}) Trace ID: Span ID: Flags: 0 ``` Signed-off-by: ChrsMark --- .chloggen/k8sobserver_endpoints.yaml | 27 ++++++++ extension/observer/endpoints.go | 27 ++++++++ extension/observer/k8sobserver/README.md | 2 +- .../observer/k8sobserver/handler_test.go | 31 +++++++++- .../observer/k8sobserver/k8s_fixtures_test.go | 9 +-- .../observer/k8sobserver/pod_endpoint.go | 45 ++++++++++++-- .../observer/k8sobserver/pod_endpoint_test.go | 12 ++++ receiver/receivercreator/README.md | 62 ++++++++++++++++++- receiver/receivercreator/config.go | 2 +- receiver/receivercreator/config_test.go | 15 ++--- receiver/receivercreator/factory.go | 8 +++ receiver/receivercreator/observerhandler.go | 1 + receiver/receivercreator/rules.go | 2 +- receiver/receivercreator/testdata/config.yaml | 2 + 14 files changed, 223 insertions(+), 22 deletions(-) create mode 100644 .chloggen/k8sobserver_endpoints.yaml diff --git a/.chloggen/k8sobserver_endpoints.yaml b/.chloggen/k8sobserver_endpoints.yaml new file mode 100644 index 000000000000..597af5644833 --- /dev/null +++ b/.chloggen/k8sobserver_endpoints.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: k8sobserver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Emit endpoint per Pod's container + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35491] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/extension/observer/endpoints.go b/extension/observer/endpoints.go index 2a9493bb2f60..cd51a35e036d 100644 --- a/extension/observer/endpoints.go +++ b/extension/observer/endpoints.go @@ -24,6 +24,8 @@ const ( PortType EndpointType = "port" // PodType is a pod endpoint. PodType EndpointType = "pod" + // PodContainerType is a pod's container endpoint. + PodContainerType EndpointType = "pod.container" // K8sServiceType is a service endpoint. K8sServiceType EndpointType = "k8s.service" // K8sIngressType is a ingress endpoint. @@ -218,6 +220,31 @@ func (p *Pod) Type() EndpointType { return PodType } +// PodContainer is a discovered k8s pod's container +type PodContainer struct { + // Name of the container + Name string + // Image of the container + Image string + // ContainerID is the id of the container exposing the Endpoint + ContainerID string + // Pod is the k8s pod in which the container is running + Pod Pod +} + +func (p *PodContainer) Env() EndpointEnv { + return map[string]any{ + "container_name": p.Name, + "container_id": p.ContainerID, + "container_image": p.Image, + "pod": p.Pod.Env(), + } +} + +func (p *PodContainer) Type() EndpointType { + return PodContainerType +} + // Port is an endpoint that has a target as well as a port. type Port struct { // Name is the name of the container port. diff --git a/extension/observer/k8sobserver/README.md b/extension/observer/k8sobserver/README.md index 29c4e8c9af34..013b7c85b4f8 100644 --- a/extension/observer/k8sobserver/README.md +++ b/extension/observer/k8sobserver/README.md @@ -13,7 +13,7 @@ The `k8s_observer` is a [Receiver Creator](../../../receiver/receivercreator/README.md)-compatible "watch observer" that will detect and report -Kubernetes pod, port, service, ingress and node endpoints via the Kubernetes API. +Kubernetes pod, port, container, service, ingress and node endpoints via the Kubernetes API. ## Example Config diff --git a/extension/observer/k8sobserver/handler_test.go b/extension/observer/k8sobserver/handler_test.go index 8e813c650a7a..d93275eb2ff3 100644 --- a/extension/observer/k8sobserver/handler_test.go +++ b/extension/observer/k8sobserver/handler_test.go @@ -36,7 +36,20 @@ func TestPodEndpointsAdded(t *testing.T) { UID: "pod-2-UID", Labels: map[string]string{"env": "prod"}, }, - }, { + }, + { + ID: "test-1/pod-2-UID/container-2", + Target: "1.2.3.4", + Details: &observer.PodContainer{ + Name: "container-2", + Image: "container-image-2", + ContainerID: "a808232bb4a57d421bb16f20dc9ab2a441343cb0aae8c369dc375838c7a49fd7", + Pod: observer.Pod{ + Name: "pod-2", + Namespace: "default", + UID: "pod-2-UID", + Labels: map[string]string{"env": "prod"}}}}, + { ID: "test-1/pod-2-UID/https(443)", Target: "1.2.3.4:443", Details: &observer.Port{ @@ -73,8 +86,8 @@ func TestPodEndpointsChanged(t *testing.T) { endpoints := th.ListEndpoints() require.ElementsMatch(t, - []observer.EndpointID{"test-1/pod-2-UID", "test-1/pod-2-UID/https(443)"}, - []observer.EndpointID{endpoints[0].ID, endpoints[1].ID}, + []observer.EndpointID{"test-1/pod-2-UID", "test-1/pod-2-UID/container-2", "test-1/pod-2-UID/https(443)"}, + []observer.EndpointID{endpoints[0].ID, endpoints[1].ID, endpoints[2].ID}, ) // Running state changed, one added and one removed. @@ -90,6 +103,18 @@ func TestPodEndpointsChanged(t *testing.T) { Namespace: "default", UID: "pod-2-UID", Labels: map[string]string{"env": "prod", "updated-label": "true"}}}, + { + ID: "test-1/pod-2-UID/container-2", + Target: "1.2.3.4", + Details: &observer.PodContainer{ + Name: "container-2", + Image: "container-image-2", + ContainerID: "a808232bb4a57d421bb16f20dc9ab2a441343cb0aae8c369dc375838c7a49fd7", + Pod: observer.Pod{ + Name: "pod-2", + Namespace: "default", + UID: "pod-2-UID", + Labels: map[string]string{"env": "prod", "updated-label": "true"}}}}, { ID: "test-1/pod-2-UID/https(443)", Target: "1.2.3.4:443", diff --git a/extension/observer/k8sobserver/k8s_fixtures_test.go b/extension/observer/k8sobserver/k8s_fixtures_test.go index 887f01a0eef1..9a77c5517941 100644 --- a/extension/observer/k8sobserver/k8s_fixtures_test.go +++ b/extension/observer/k8sobserver/k8s_fixtures_test.go @@ -71,7 +71,7 @@ var container1StatusWaiting = v1.ContainerStatus{ RestartCount: 1, Image: "container-image-1", ImageID: "12345", - ContainerID: "82389", + ContainerID: "containerd://a808232bb4a57d421bb16f20dc9ab2a441343cb0aae8c369dc375838c7a49fd7", Started: nil, } @@ -80,9 +80,10 @@ var container2StatusRunning = v1.ContainerStatus{ State: v1.ContainerState{ Running: &v1.ContainerStateRunning{StartedAt: metav1.Now()}, }, - Ready: true, - Image: "container-image-1", - Started: pointerBool(true), + Ready: true, + Image: "container-image-1", + Started: pointerBool(true), + ContainerID: "containerd://a808232bb4a57d421bb16f20dc9ab2a441343cb0aae8c369dc375838c7a49fd7", } var podWithNamedPorts = func() *v1.Pod { diff --git a/extension/observer/k8sobserver/pod_endpoint.go b/extension/observer/k8sobserver/pod_endpoint.go index e926d29a98eb..56653508d8b2 100644 --- a/extension/observer/k8sobserver/pod_endpoint.go +++ b/extension/observer/k8sobserver/pod_endpoint.go @@ -5,6 +5,7 @@ package k8sobserver // import "github.com/open-telemetry/opentelemetry-collector import ( "fmt" + "strings" v1 "k8s.io/api/core/v1" @@ -38,22 +39,41 @@ func convertPodToEndpoints(idNamespace string, pod *v1.Pod) []observer.Endpoint }} // Map of running containers by name. - containerRunning := map[string]bool{} + runningContainers := map[string]RunningContainer{} for _, container := range pod.Status.ContainerStatuses { if container.State.Running != nil { - containerRunning[container.Name] = true + runningContainers[container.Name] = containerIDWithRuntime(container) } } // Create endpoint for each named container port. for _, container := range pod.Spec.Containers { - if !containerRunning[container.Name] { + var runningContainer RunningContainer + var ok bool + if runningContainer, ok = runningContainers[container.Name]; !ok { continue } + endpointID := observer.EndpointID( + fmt.Sprintf( + "%s/%s", podID, container.Name, + ), + ) + endpoints = append(endpoints, observer.Endpoint{ + ID: endpointID, + Target: podIP, + Details: &observer.PodContainer{ + Name: container.Name, + ContainerID: runningContainer.ID, + Image: container.Image, + Pod: podDetails, + }, + }) + + // Create endpoint for each named container port. for _, port := range container.Ports { - endpointID := observer.EndpointID( + endpointID = observer.EndpointID( fmt.Sprintf( "%s/%s(%d)", podID, port.Name, port.ContainerPort, ), @@ -83,3 +103,20 @@ func getTransport(protocol v1.Protocol) observer.Transport { } return observer.ProtocolUnknown } + +// containerIDWithRuntime parses the container ID to get the actual ID string +func containerIDWithRuntime(c v1.ContainerStatus) RunningContainer { + cID := c.ContainerID + if cID != "" { + parts := strings.Split(cID, "://") + if len(parts) == 2 { + return RunningContainer{parts[1], parts[0]} + } + } + return RunningContainer{} +} + +type RunningContainer struct { + ID string + Runtime string +} diff --git a/extension/observer/k8sobserver/pod_endpoint_test.go b/extension/observer/k8sobserver/pod_endpoint_test.go index adf7b0b33353..820832a3f3b9 100644 --- a/extension/observer/k8sobserver/pod_endpoint_test.go +++ b/extension/observer/k8sobserver/pod_endpoint_test.go @@ -21,6 +21,18 @@ func TestPodObjectToPortEndpoint(t *testing.T) { Namespace: "default", UID: "pod-2-UID", Labels: map[string]string{"env": "prod"}}}, + { + ID: "namespace/pod-2-UID/container-2", + Target: "1.2.3.4", + Details: &observer.PodContainer{ + Name: "container-2", + Image: "container-image-2", + ContainerID: "a808232bb4a57d421bb16f20dc9ab2a441343cb0aae8c369dc375838c7a49fd7", + Pod: observer.Pod{ + Name: "pod-2", + Namespace: "default", + UID: "pod-2-UID", + Labels: map[string]string{"env": "prod"}}}}, { ID: "namespace/pod-2-UID/https(443)", Target: "1.2.3.4:443", diff --git a/receiver/receivercreator/README.md b/receiver/receivercreator/README.md index 4099660c42ff..d0af39c09a12 100644 --- a/receiver/receivercreator/README.md +++ b/receiver/receivercreator/README.md @@ -109,6 +109,18 @@ Note that the backticks below are not typos--they indicate the value is set dyna | k8s.pod.uid | \`pod.uid\` | | k8s.namespace.name | \`pod.namespace\` | +`type == "pod.container"` + +| Resource Attribute | Default | +|----------------------|---------------------| +| k8s.pod.name | \`pod.name\` | +| k8s.pod.uid | \`pod.uid\` | +| k8s.namespace.name | \`pod.namespace\` | +| container.name | \`name\` | +| k8s.container.name | \`container_name\` | +| container.image.name | \`container_image\` | +| container.id | \`container_id\` | + `type == "container"` | Resource Attribute | Default | @@ -155,7 +167,7 @@ Similar to the per-endpoint type `resource_attributes` described above but for i ## Rule Expressions -Each rule must start with `type == ("pod"|"port"|"hostport"|"container"|"k8s.service"|"k8s.node"|"k8s.ingress") &&` such that the rule matches +Each rule must start with `type == ("pod"|"port"|"pod.container"|"hostport"|"container"|"k8s.service"|"k8s.node"|"k8s.ingress") &&` such that the rule matches only one endpoint type. Depending on the type of endpoint the rule is targeting it will have different variables available. @@ -186,6 +198,21 @@ targeting it will have different variables available. | pod.labels | map of labels of the owning pod | Map with String key and value | | pod.annotations | map of annotations of the owning pod | Map with String key and value | +### Pod Container + +| Variable | Description | Data Type | +|-----------------|--------------------------------------|-------------------------------| +| type | `"pod.container"` | String | +| id | ID of source endpoint | String | +| container_name | container name | String | +| container_id | container id | String | +| container_image | container image | String | +| pod.name | name of the owning pod | String | +| pod.namespace | namespace of the pod | String | +| pod.uid | unique id of the pod | String | +| pod.labels | map of labels of the owning pod | Map with String key and value | +| pod.annotations | map of annotations of the owning pod | Map with String key and value | + ### Host Port | Variable | Description | Data Type | @@ -359,6 +386,35 @@ receivers: - endpoint: '`scheme`://`endpoint`:`port``"prometheus.io/path" in annotations ? annotations["prometheus.io/path"] : "/health"`' method: GET collection_interval: 10s + receiver_creator/logs: + watch_observers: [ k8s_observer ] + receivers: + filelog/busybox: + rule: type == "pod.container" && container_name == "busybox" + config: + include: + - /var/log/pods/`pod.namespace`_`pod.name`_`pod.uid`/`container_name`/*.log + include_file_name: false + include_file_path: true + operators: + - id: container-parser + type: container + - type: add + field: attributes.log.template + value: busybox + filelog/lazybox: + rule: type == "pod.container" && container_name == "lazybox" + config: + include: + - /var/log/pods/`pod.namespace`_`pod.name`_`pod.uid`/`container_name`/*.log + include_file_name: false + include_file_path: true + operators: + - id: container-parser + type: container + - type: add + field: attributes.log.template + value: lazybox processors: exampleprocessor: @@ -372,6 +428,10 @@ service: receivers: [receiver_creator/1, receiver_creator/2, receiver_creator/3, receiver_creator/4] processors: [exampleprocessor] exporters: [exampleexporter] + logs: + receivers: [receiver_creator/logs] + processors: [exampleprocessor] + exporters: [exampleexporter] extensions: [k8s_observer, host_observer] ``` diff --git a/receiver/receivercreator/config.go b/receiver/receivercreator/config.go index c12109946910..bb5ebfaa4f6f 100644 --- a/receiver/receivercreator/config.go +++ b/receiver/receivercreator/config.go @@ -92,7 +92,7 @@ func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error { for endpointType := range cfg.ResourceAttributes { switch endpointType { - case observer.ContainerType, observer.K8sServiceType, observer.K8sIngressType, observer.HostPortType, observer.K8sNodeType, observer.PodType, observer.PortType: + case observer.ContainerType, observer.K8sServiceType, observer.K8sIngressType, observer.HostPortType, observer.K8sNodeType, observer.PodType, observer.PortType, observer.PodContainerType: default: return fmt.Errorf("resource attributes for unsupported endpoint type %q", endpointType) } diff --git a/receiver/receivercreator/config_test.go b/receiver/receivercreator/config_test.go index d973a363c4cd..f9f9eb85ae6f 100644 --- a/receiver/receivercreator/config_test.go +++ b/receiver/receivercreator/config_test.go @@ -109,13 +109,14 @@ func TestLoadConfig(t *testing.T) { component.MustNewIDWithName("mock_observer", "with_name"), }, ResourceAttributes: map[observer.EndpointType]map[string]string{ - observer.ContainerType: {"container.key": "container.value"}, - observer.PodType: {"pod.key": "pod.value"}, - observer.PortType: {"port.key": "port.value"}, - observer.HostPortType: {"hostport.key": "hostport.value"}, - observer.K8sServiceType: {"k8s.service.key": "k8s.service.value"}, - observer.K8sIngressType: {"k8s.ingress.key": "k8s.ingress.value"}, - observer.K8sNodeType: {"k8s.node.key": "k8s.node.value"}, + observer.ContainerType: {"container.key": "container.value"}, + observer.PodType: {"pod.key": "pod.value"}, + observer.PodContainerType: {"pod.container.key": "pod.container.value"}, + observer.PortType: {"port.key": "port.value"}, + observer.HostPortType: {"hostport.key": "hostport.value"}, + observer.K8sServiceType: {"k8s.service.key": "k8s.service.value"}, + observer.K8sIngressType: {"k8s.ingress.key": "k8s.ingress.value"}, + observer.K8sNodeType: {"k8s.node.key": "k8s.node.value"}, }, }, }, diff --git a/receiver/receivercreator/factory.go b/receiver/receivercreator/factory.go index abba7f8139fe..9bcfc98a6dff 100644 --- a/receiver/receivercreator/factory.go +++ b/receiver/receivercreator/factory.go @@ -50,6 +50,14 @@ func createDefaultConfig() component.Config { conventions.AttributeK8SPodUID: "`pod.uid`", conventions.AttributeK8SNamespaceName: "`pod.namespace`", }, + observer.PodContainerType: map[string]string{ + conventions.AttributeK8SPodName: "`pod.name`", + conventions.AttributeK8SPodUID: "`pod.uid`", + conventions.AttributeK8SNamespaceName: "`pod.namespace`", + conventions.AttributeK8SContainerName: "`container_name`", + conventions.AttributeContainerID: "`container_id`", + conventions.AttributeContainerImageName: "`container_image`", + }, observer.ContainerType: map[string]string{ conventions.AttributeContainerName: "`name`", conventions.AttributeContainerImageName: "`image`", diff --git a/receiver/receivercreator/observerhandler.go b/receiver/receivercreator/observerhandler.go index e85f385850f9..de1d9689953b 100644 --- a/receiver/receivercreator/observerhandler.go +++ b/receiver/receivercreator/observerhandler.go @@ -103,6 +103,7 @@ func (obs *observerHandler) OnAdd(added []observer.Endpoint) { obs.params.TelemetrySettings.Logger.Error("unable to resolve template config", zap.String("receiver", template.id.String()), zap.Error(err)) continue } + obs.params.TelemetrySettings.Logger.Debug("resolved config", zap.String("receiver", template.id.String()), zap.Any("config", resolvedConfig)) discoveredCfg := userConfigMap{} // If user didn't set endpoint set to default value as well as diff --git a/receiver/receivercreator/rules.go b/receiver/receivercreator/rules.go index 422e7813ff1b..5c397ad84070 100644 --- a/receiver/receivercreator/rules.go +++ b/receiver/receivercreator/rules.go @@ -22,7 +22,7 @@ type rule struct { // ruleRe is used to verify the rule starts type check. var ruleRe = regexp.MustCompile( - fmt.Sprintf(`^type\s*==\s*(%q|%q|%q|%q|%q|%q|%q)`, observer.PodType, observer.K8sServiceType, observer.K8sIngressType, observer.PortType, observer.HostPortType, observer.ContainerType, observer.K8sNodeType), + fmt.Sprintf(`^type\s*==\s*(%q|%q|%q|%q|%q|%q|%q|%q)`, observer.PodType, observer.K8sServiceType, observer.K8sIngressType, observer.PortType, observer.PodContainerType, observer.HostPortType, observer.ContainerType, observer.K8sNodeType), ) // newRule creates a new rule instance. diff --git a/receiver/receivercreator/testdata/config.yaml b/receiver/receivercreator/testdata/config.yaml index 040e0e04d92f..ea7af84009e9 100644 --- a/receiver/receivercreator/testdata/config.yaml +++ b/receiver/receivercreator/testdata/config.yaml @@ -21,6 +21,8 @@ receiver_creator/1: container.key: container.value pod: pod.key: pod.value + pod.container: + pod.container.key: pod.container.value port: port.key: port.value hostport: