apiVersion: v1 kind: ConfigMap metadata: name: agent-kubernetes-input namespace: kube-system labels: k8s-app: elastic-agent-standalone data: # kubernetes.yml: |- # inputs: # - name: kubernetes-cluster-metrics # condition: ${kubernetes_leaderelection.leader} == true # type: kubernetes/metrics # use_output: default # meta: # package: # name: kubernetes # version: 1.9.0 # data_stream: # namespace: default # streams: # - data_stream: # dataset: kubernetes.apiserver # type: metrics # metricsets: # - apiserver # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # hosts: # - 'https://${env.KUBERNETES_SERVICE_HOST}:${env.KUBERNETES_SERVICE_PORT}' # period: 30s # ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/ca.crt # - data_stream: # dataset: kubernetes.event # type: metrics # metricsets: # - event # period: 10s # add_metadata: true # - data_stream: # dataset: kubernetes.state_container # type: metrics # metricsets: # - state_container # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # # If `https` is used to access `kube-state-metrics`, then to all `kubernetes.state_*` datasets should be added: # # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # # ssl.certificate_authorities: # # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt # - data_stream: # dataset: kubernetes.state_cronjob # type: metrics # metricsets: # - state_cronjob # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - data_stream: # dataset: kubernetes.state_daemonset # type: metrics # metricsets: # - state_daemonset # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - data_stream: # dataset: kubernetes.state_deployment # type: metrics # metricsets: # - state_deployment # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - data_stream: # dataset: kubernetes.state_job # type: metrics # metricsets: # - state_job # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - data_stream: # dataset: kubernetes.state_node # type: metrics # metricsets: # - state_node # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - data_stream: # dataset: kubernetes.state_persistentvolume # type: metrics # metricsets: # - state_persistentvolume # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - data_stream: # dataset: kubernetes.state_persistentvolumeclaim # type: metrics # metricsets: # - state_persistentvolumeclaim # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - data_stream: # dataset: kubernetes.state_pod # type: metrics # metricsets: # - state_pod # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - data_stream: # dataset: kubernetes.state_replicaset # type: metrics # metricsets: # - state_replicaset # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - data_stream: # dataset: kubernetes.state_resourcequota # type: metrics # metricsets: # - state_resourcequota # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - data_stream: # dataset: kubernetes.state_service # type: metrics # metricsets: # - state_service # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - data_stream: # dataset: kubernetes.state_statefulset # type: metrics # metricsets: # - state_statefulset # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - data_stream: # dataset: kubernetes.state_storageclass # type: metrics # metricsets: # - state_storageclass # add_metadata: true # hosts: # - 'kube-state-metrics:8080' # period: 10s # - name: system-logs # type: logfile # use_output: default # meta: # package: # name: system # version: 0.10.7 # data_stream: # namespace: default # streams: # - data_stream: # dataset: system.auth # type: logs # paths: # - /var/log/auth.log* # - /var/log/secure* # exclude_files: # - .gz$ # multiline: # pattern: ^\s # match: after # processors: # - add_fields: # target: '' # fields: # ecs.version: 1.12.0 # - data_stream: # dataset: system.syslog # type: logs # paths: # - /var/log/messages* # - /var/log/syslog* # exclude_files: # - .gz$ # multiline: # pattern: ^\s # match: after # processors: # - add_fields: # target: '' # fields: # ecs.version: 1.12.0 # - name: container-log # type: filestream # use_output: default # meta: # package: # name: kubernetes # version: 1.9.0 # data_stream: # namespace: default # streams: # - data_stream: # dataset: kubernetes.container_logs # type: logs # prospector.scanner.symlinks: true # parsers: # - container: ~ # # - ndjson: # # target: json # # - multiline: # # type: pattern # # pattern: '^\[' # # negate: true # # match: after # paths: # - /var/log/containers/*${kubernetes.container.id}.log # - name: audit-log # type: filestream # use_output: default # meta: # package: # name: kubernetes # version: 1.9.0 # data_stream: # namespace: default # streams: # - data_stream: # dataset: kubernetes.audit_logs # type: logs # exclude_files: # - .gz$ # parsers: # - ndjson: # add_error_key: true # target: kubernetes_audit # paths: # - /var/log/kubernetes/kube-apiserver-audit.log # # The default path of audit logs on Openshift: # # - /var/log/kube-apiserver/audit.log # processors: # - rename: # fields: # - from: kubernetes_audit # to: kubernetes.audit # - script: # id: dedot_annotations # lang: javascript # source: | # function process(event) { # var audit = event.Get("kubernetes.audit"); # for (var annotation in audit["annotations"]) { # var annotation_dedoted = annotation.replace(/\./g,'_') # event.Rename("kubernetes.audit.annotations."+annotation, "kubernetes.audit.annotations."+annotation_dedoted) # } # return event; # } function test() { # var event = process(new Event({ "kubernetes": { "audit": { "annotations": { "authorization.k8s.io/decision": "allow", "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"system:kube-scheduler\" of ClusterRole \"system:kube-scheduler\" to User \"system:kube-scheduler\"" } } } })); # if (event.Get("kubernetes.audit.annotations.authorization_k8s_io/decision") !== "allow") { # throw "expected kubernetes.audit.annotations.authorization_k8s_io/decision === allow"; # } # } # - name: system-metrics # type: system/metrics # use_output: default # meta: # package: # name: system # version: 0.10.9 # data_stream: # namespace: default # streams: # - data_stream: # dataset: system.core # type: metrics # metricsets: # - core # core.metrics: # - percentages # - data_stream: # dataset: system.cpu # type: metrics # period: 10s # cpu.metrics: # - percentages # - normalized_percentages # metricsets: # - cpu # - data_stream: # dataset: system.diskio # type: metrics # period: 10s # diskio.include_devices: null # metricsets: # - diskio # - data_stream: # dataset: system.filesystem # type: metrics # period: 1m # metricsets: # - filesystem # processors: # - drop_event.when.regexp: # system.filesystem.mount_point: ^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/) # - data_stream: # dataset: system.fsstat # type: metrics # period: 1m # metricsets: # - fsstat # processors: # - drop_event.when.regexp: # system.fsstat.mount_point: ^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/) # - data_stream: # dataset: system.load # type: metrics # period: 10s # metricsets: # - load # - data_stream: # dataset: system.memory # type: metrics # period: 10s # metricsets: # - memory # - data_stream: # dataset: system.network # type: metrics # period: 10s # network.interfaces: null # metricsets: # - network # - data_stream: # dataset: system.process # type: metrics # process.include_top_n.by_memory: 5 # period: 10s # processes: # - .* # process.include_top_n.by_cpu: 5 # process.cgroups.enabled: false # process.cmdline.cache.enabled: true # metricsets: # - process # process.include_cpu_ticks: false # system.hostfs: /hostfs # - data_stream: # dataset: system.process_summary # type: metrics # period: 10s # metricsets: # - process_summary # system.hostfs: /hostfs # - data_stream: # dataset: system.socket_summary # type: metrics # period: 10s # metricsets: # - socket_summary # system.hostfs: /hostfs # - name: kubernetes-node-metrics # type: kubernetes/metrics # use_output: default # meta: # package: # name: kubernetes # version: 1.9.0 # data_stream: # namespace: default # streams: # - data_stream: # dataset: kubernetes.controllermanager # type: metrics # metricsets: # - controllermanager # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # hosts: # - 'https://${kubernetes.pod.ip}:10257' # period: 10s # ssl.verification_mode: none # condition: ${kubernetes.labels.component} == 'kube-controller-manager' # # Openshift: # # condition: ${kubernetes.labels.app} == 'kube-controller-manager' # - data_stream: # dataset: kubernetes.scheduler # type: metrics # metricsets: # - scheduler # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # hosts: # - 'https://${kubernetes.pod.ip}:10259' # period: 10s # ssl.verification_mode: none # condition: ${kubernetes.labels.component} == 'kube-scheduler' # # Openshift: # # condition: ${kubernetes.labels.app} == 'openshift-kube-scheduler' # - data_stream: # dataset: kubernetes.proxy # type: metrics # metricsets: # - proxy # hosts: # - 'localhost:10249' # # Openshift: # # - 'localhost:29101' # period: 10s # - data_stream: # dataset: kubernetes.container # type: metrics # metricsets: # - container # add_metadata: true # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # hosts: # - 'https://${env.NODE_NAME}:10250' # period: 10s # ssl.verification_mode: none # # On Openshift ssl configuration must be replaced: # # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # # ssl.certificate_authorities: # # - /path/to/ca-bundle.crt # - data_stream: # dataset: kubernetes.node # type: metrics # metricsets: # - node # add_metadata: true # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # hosts: # - 'https://${env.NODE_NAME}:10250' # period: 10s # ssl.verification_mode: none # # On Openshift ssl configuration must be replaced: # # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # # ssl.certificate_authorities: # # - /path/to/ca-bundle.crt # - data_stream: # dataset: kubernetes.pod # type: metrics # metricsets: # - pod # add_metadata: true # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # hosts: # - 'https://${env.NODE_NAME}:10250' # period: 10s # ssl.verification_mode: none # # On Openshift ssl configuration must be replaced: # # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # # ssl.certificate_authorities: # # - /path/to/ca-bundle.crt # - data_stream: # dataset: kubernetes.system # type: metrics # metricsets: # - system # add_metadata: true # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # hosts: # - 'https://${env.NODE_NAME}:10250' # period: 10s # ssl.verification_mode: none # # On Openshift ssl configuration must be replaced: # # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # # ssl.certificate_authorities: # # - /path/to/ca-bundle.crt # - data_stream: # dataset: kubernetes.volume # type: metrics # metricsets: # - volume # add_metadata: true # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # hosts: # - 'https://${env.NODE_NAME}:10250' # period: 10s # ssl.verification_mode: none # # On Openshift ssl configuration must be replaced: # # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # # ssl.certificate_authorities: # # - /path/to/ca-bundle.crt # # Add extra input blocks here, based on conditions # # so as to automatically identify targeted Pods and start monitoring them # # using a predefined integration. For instance: # #- name: redis # # type: redis/metrics # # use_output: default # # meta: # # package: # # name: redis # # version: 0.3.6 # # data_stream: # # namespace: default # # streams: # # - data_stream: # # dataset: redis.info # # type: metrics # # metricsets: # # - info # # hosts: # # - '${kubernetes.pod.ip}:6379' # # idle_timeout: 20s # # maxconn: 10 # # network: tcp # # period: 10s # # condition: ${kubernetes.labels.app} == 'redis' nats.yml: |- inputs: - name: filestream-nats type: filestream use_output: default streams: - condition: ${kubernetes.hints.nats.log.enabled} == true or ${kubernetes.hints.nats.enabled} == true data_stream: dataset: nats.log type: logs exclude_files: - .gz$ parsers: - container: format: auto stream: ${kubernetes.hints.nats.log.stream|all} paths: - /var/log/containers/*${kubernetes.hints.container_id}.log processors: ${kubernetes.hints.nats.log.processors} prospector: scanner: symlinks: true tags: - nats-log data_stream.namespace: default - name: nats/metrics-nats type: nats/metrics use_output: default streams: - condition: ${kubernetes.hints.nats.connection.enabled} == true and ${kubernetes.hints.nats.enabled} == true data_stream: dataset: nats.connection type: metrics hosts: - ${kubernetes.hints.nats.connection.host|localhost:8222} metricsets: - connection period: ${kubernetes.hints.nats.connection.period|10s} processors: ${kubernetes.hints.nats.connection.processors} - condition: ${kubernetes.hints.nats.connections.enabled} == true or ${kubernetes.hints.nats.enabled} == true data_stream: dataset: nats.connections type: metrics hosts: - ${kubernetes.hints.nats.connections.host|localhost:8222} metricsets: - connections period: ${kubernetes.hints.nats.connections.period|10s} processors: ${kubernetes.hints.nats.connections.processors} - condition: ${kubernetes.hints.nats.route.enabled} == true and ${kubernetes.hints.nats.enabled} == true data_stream: dataset: nats.route type: metrics hosts: - ${kubernetes.hints.nats.route.host|localhost:8222} metricsets: - route period: ${kubernetes.hints.nats.route.period|10s} processors: ${kubernetes.hints.nats.route.processors} - condition: ${kubernetes.hints.nats.routes.enabled} == true or ${kubernetes.hints.nats.enabled} == true data_stream: dataset: nats.routes type: metrics hosts: - ${kubernetes.hints.nats.routes.host|localhost:8222} metricsets: - routes period: ${kubernetes.hints.nats.routes.period|10s} processors: ${kubernetes.hints.nats.routes.processors} - condition: ${kubernetes.hints.nats.stats.enabled} == true or ${kubernetes.hints.nats.enabled} == true data_stream: dataset: nats.stats type: metrics hosts: - ${kubernetes.hints.nats.stats.host|localhost:8222} metricsets: - stats period: ${kubernetes.hints.nats.stats.period|10s} processors: ${kubernetes.hints.nats.stats.processors} - condition: ${kubernetes.hints.nats.subscriptions.enabled} == true or ${kubernetes.hints.nats.enabled} == true data_stream: dataset: nats.subscriptions type: metrics hosts: - ${kubernetes.hints.nats.subscriptions.host|localhost:8222} metricsets: - subscriptions period: ${kubernetes.hints.nats.subscriptions.period|10s} processors: ${kubernetes.hints.nats.subscriptions.processors} data_stream.namespace: default redis2.yml: | inputs: - name: filestream-redis type: filestream use_output: default streams: - condition: ${kubernetes.hints.redis.log.enabled} == true or ${kubernetes.hints.redis.enabled} == true data_stream: dataset: redis.log type: logs exclude_files: - .gz$ exclude_lines: - ^\s+[\-`('.|_] parsers: - container: format: auto stream: ${kubernetes.hints.redis.log.stream|all} paths: - /var/log/containers/*${kubernetes.hints.container_id}.log processors: ${kubernetes.hints.redis.log.processors} prospector: scanner: symlinks: true tags: - redis-log data_stream.namespace: default - name: redis-redis type: redis use_output: default streams: - condition: ${kubernetes.hints.redis.slowlog.enabled} == true or ${kubernetes.hints.redis.enabled} == true data_stream: dataset: redis.slowlog type: logs hosts: - ${kubernetes.hints.redis.slowlog.host|127.0.0.1:6379} password: ${kubernetes.hints.redis.slowlog.password|} processors: ${kubernetes.hints.redis.slowlog.processors} data_stream.namespace: default - name: redis/metrics-redis type: redis/metrics use_output: default streams: - condition: ${kubernetes.hints.redis.info.enabled} == true or ${kubernetes.hints.redis.enabled} == true data_stream: dataset: redis.info type: metrics hosts: - ${kubernetes.hints.redis.info.host|127.0.0.1:6379} idle_timeout: 20s maxconn: 10 metricsets: - info network: tcp password: ${kubernetes.hints.redis.info.password|} period: ${kubernetes.hints.redis.info.period|10s} processors: ${kubernetes.hints.redis.info.processors} - condition: ${kubernetes.hints.redis.key.enabled} == true or ${kubernetes.hints.redis.enabled} == true data_stream: dataset: redis.key type: metrics hosts: - ${kubernetes.hints.redis.key.host|127.0.0.1:6379} idle_timeout: 20s maxconn: 10 metricsets: - key network: tcp password: ${kubernetes.hints.redis.key.password|} period: ${kubernetes.hints.redis.key.period|10s} processors: ${kubernetes.hints.redis.key.processors} - condition: ${kubernetes.hints.redis.keyspace.enabled} == true or ${kubernetes.hints.redis.enabled} == true data_stream: dataset: redis.keyspace type: metrics hosts: - ${kubernetes.hints.redis.keyspace.host|127.0.0.1:6379} idle_timeout: 20s maxconn: 10 metricsets: - keyspace network: tcp password: ${kubernetes.hints.redis.keyspace.password|} period: ${kubernetes.hints.redis.keyspace.period|10s} processors: ${kubernetes.hints.redis.keyspace.processors} data_stream.namespace: default redis.yml: |- inputs: - name: redis type: redis/metrics use_output: default meta: package: name: redis version: 0.3.6 data_stream: namespace: default streams: - data_stream: dataset: redis.info type: metrics metricsets: - info - key hosts: - '${kubernetes.pod.ip}:6379' idle_timeout: 20s maxconn: 10 network: tcp period: 10s condition: ${kubernetes.labels.app} == 'redis'