diff --git a/.yamllint.yml b/.yamllint.yml index 8e2133ae4..f50c93057 100644 --- a/.yamllint.yml +++ b/.yamllint.yml @@ -10,6 +10,8 @@ ignore: - node_modules - charts/k8s-monitoring/vendir.lock.yml - charts/k8s-monitoring/docs/examples/**/output.yaml + - charts/k8s-monitoring/tests/integration/**/output.yaml + - charts/k8s-monitoring/tests/platform/**/output.yaml - charts/k8s-monitoring-v1/docs/examples/**/output.yaml - charts/**/templates diff --git a/charts/feature-node-logs/.helmignore b/charts/feature-node-logs/.helmignore new file mode 100644 index 000000000..2b29eaf56 --- /dev/null +++ b/charts/feature-node-logs/.helmignore @@ -0,0 +1,6 @@ +docs +schema-mods +tests +Makefile +README.md +README.md.gotmpl diff --git a/charts/feature-node-logs/Chart.lock b/charts/feature-node-logs/Chart.lock new file mode 100644 index 000000000..cc833191e --- /dev/null +++ b/charts/feature-node-logs/Chart.lock @@ -0,0 +1,3 @@ +dependencies: [] +digest: sha256:643d5437104296e21d906ecb15b2c96ad278f20cfc4af53b12bb6069bd853726 +generated: "2024-08-28T15:09:37.347011-05:00" diff --git a/charts/feature-node-logs/Chart.yaml b/charts/feature-node-logs/Chart.yaml new file mode 100644 index 000000000..555cbb073 --- /dev/null +++ b/charts/feature-node-logs/Chart.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v2 +name: k8s-monitoring-feature-node-logs +description: Kubernetes Observability feature for gathering Cluster Node logs. +type: application +sources: + - https://github.com/grafana/k8s-monitoring-helm/tree/main/charts/feature-node-logs +version: 1.0.0 +appVersion: 1.0.0 +maintainers: + - email: pete.wall@grafana.com + name: petewall +dependencies: [] diff --git a/charts/feature-node-logs/Makefile b/charts/feature-node-logs/Makefile new file mode 100644 index 000000000..931471179 --- /dev/null +++ b/charts/feature-node-logs/Makefile @@ -0,0 +1,38 @@ +HAS_HELM_DOCS := $(shell command -v helm-docs;) +HAS_HELM_UNITTEST := $(shell helm plugin list | grep unittest 2> /dev/null) +UPDATECLI_FILES := $(shell yq -e '.dependencies[] | select(.repository == "http*") | ".updatecli-" + .name + ".yaml"' Chart.yaml 2>/dev/null | sort | uniq) + +.SECONDEXPANSION: +README.md: values.yaml Chart.yaml $$(wildcard README.md.gotmpl) +ifdef HAS_HELM_DOCS + helm-docs +else + docker run --rm --volume "$(shell pwd):/helm-docs" -u $(shell id -u) jnorwood/helm-docs:latest +endif + +Chart.lock: Chart.yaml + helm dependency update . + @touch Chart.lock # Ensure the timestamp is updated + +values.schema.json: values.yaml $$(wildcard schema-mods/*) + ../../scripts/schema-gen.sh . + +.updatecli-%.yaml: Chart.yaml + ../../scripts/charts-to-updatecli.sh Chart.yaml + +.PHONY: clean +clean: + rm -f README.md values.schema.json $(UPDATECLI_FILES) + +.PHONY: build +build: README.md Chart.lock values.schema.json $(UPDATECLI_FILES) + +.PHONY: test +test: build + helm lint . + ct lint --lint-conf ../../.configs/lintconf.yaml --helm-dependency-extra-args=--skip-refresh --charts . +ifdef HAS_HELM_UNITTEST + helm unittest . +else + docker run --rm --volume $(shell pwd):/apps helmunittest/helm-unittest . +endif diff --git a/charts/feature-node-logs/README.md b/charts/feature-node-logs/README.md new file mode 100644 index 000000000..42fa96ee9 --- /dev/null +++ b/charts/feature-node-logs/README.md @@ -0,0 +1,56 @@ + + +# k8s-monitoring-feature-node-logs + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) +Kubernetes Observability feature for gathering Cluster Node logs. + +The Node Logs feature enables the collection of logs from Kubernetes Cluster Nodes. + +## Testing + +This chart contains unit tests to verify the generated configuration. A hidden value, `deployAsConfigMap`, will render +the generated configuration into a ConfigMap object. This ConfigMap is not used during regular operation, but it is +useful for showing the outcome of a given values file. + +The unit tests use this to create an object with the configuration that can be asserted against. To run the tests, use +`helm test`. + +Actual integration testing in a live environment should be done in the main [k8s-monitoring](../k8s-monitoring) chart. + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| petewall | | | + + +## Source Code + +* + + + +## Values + +### General settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| fullnameOverride | string | `""` | Full name override | +| nameOverride | string | `""` | Name override | + +### Journal Logs + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| journal.extraDiscoveryRules | string | `""` | Rule blocks to be added used with the loki.source.journal component for journal logs. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) **Note:** Many field names from journald start with an `_`, such as `_systemd_unit`. The final internal label name would be `__journal__systemd_unit`, with two underscores between `__journal` and `systemd_unit`. | +| journal.extraLogProcessingBlocks | string | `""` | Stage blocks to be added to the loki.process component for journal logs. ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/#blocks)) This value is templated so that you can refer to other values from this file. | +| journal.formatAsJson | bool | `false` | Whether to forward the original journal entry as JSON. | +| journal.jobLabel | string | `"integrations/kubernetes/journal"` | The value for the job label for journal logs. | +| journal.maxAge | string | `"8h"` | The path to the journal logs on the worker node. | +| journal.path | string | `"/var/log/journal"` | The path to the journal logs on the worker node. | +| journal.units | list | `[]` | The list of systemd units to keep scraped logs from. If empty, all units are scraped. | diff --git a/charts/feature-node-logs/README.md.gotmpl b/charts/feature-node-logs/README.md.gotmpl new file mode 100644 index 000000000..e06cd0e8c --- /dev/null +++ b/charts/feature-node-logs/README.md.gotmpl @@ -0,0 +1,32 @@ + + +{{ template "chart.header" . }} +{{ template "chart.deprecationWarning" . }} +{{ template "chart.badgesSection" . }} +{{ template "chart.description" . }} +{{ template "chart.homepageLine" . }} + +The Node Logs feature enables the collection of logs from Kubernetes Cluster Nodes. + +## Testing + +This chart contains unit tests to verify the generated configuration. A hidden value, `deployAsConfigMap`, will render +the generated configuration into a ConfigMap object. This ConfigMap is not used during regular operation, but it is +useful for showing the outcome of a given values file. + +The unit tests use this to create an object with the configuration that can be asserted against. To run the tests, use +`helm test`. + +Actual integration testing in a live environment should be done in the main [k8s-monitoring](../k8s-monitoring) chart. + +{{ template "chart.maintainersSection" . }} + + +{{ template "chart.sourcesSection" . }} + + +{{ template "chart.requirementsSection" . }} +{{ template "chart.valuesSection" . }} diff --git a/charts/feature-node-logs/templates/_collector_validation.tpl b/charts/feature-node-logs/templates/_collector_validation.tpl new file mode 100644 index 000000000..84bb24688 --- /dev/null +++ b/charts/feature-node-logs/templates/_collector_validation.tpl @@ -0,0 +1,13 @@ +{{/* Validates that the Alloy instance is appropriate for the given Node Logs settings */}} +{{/* Inputs: Values (Node Logs values), Collector (Alloy values), CollectorName (string) */}} +{{- define "feature.nodeLogs.collector.validate" -}} +{{- if not (eq .Collector.controller.type "daemonset") }} + {{- fail (printf "Node Logs feature requires Alloy to be a DaemonSet.\nPlease set:\n%s:\n controller:\n type: daemonset" .CollectorName) }} +{{- end -}} +{{- if and (hasPrefix "/var/log" .Values.journal.path) (not .Collector.alloy.mounts.varlog) }} + {{- fail (printf "Node Logs feature requires Alloy to mount /var/log.\nPlease set:\n%s:\n alloy:\n mounts:\n varlog: true" .CollectorName) }} +{{- end -}} +{{- if .Collector.alloy.clustering.enabled }} + {{- fail (printf "Node Logs feature requires Alloy to not be in clustering mode.\nPlease set:\n%s:\n alloy:\n clustering:\n enabled: true" .CollectorName) }} +{{- end -}} +{{- end -}} diff --git a/charts/feature-node-logs/templates/_helpers.tpl b/charts/feature-node-logs/templates/_helpers.tpl new file mode 100644 index 000000000..583785373 --- /dev/null +++ b/charts/feature-node-logs/templates/_helpers.tpl @@ -0,0 +1,17 @@ +{{/* +Create a default fully qualified name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "feature.nodeLogs.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" | lower }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride | lower }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" | lower }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" | lower }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/feature-node-logs/templates/_module.alloy.tpl b/charts/feature-node-logs/templates/_module.alloy.tpl new file mode 100644 index 000000000..fe780316c --- /dev/null +++ b/charts/feature-node-logs/templates/_module.alloy.tpl @@ -0,0 +1,49 @@ +{{- define "feature.nodeLogs.module" }} +declare "node_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.relabel "journal" { + {{- if len .Values.journal.units }} + rule { + action = "keep" + source_labels = ["__journal__systemd_unit"] + regex = "{{ join "|" .Values.journal.units }}" + } + {{- end }} + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "unit" + } + {{- if .Values.journal.extraDiscoveryRules }} + {{ .Values.journal.extraDiscoveryRules | indent 2 }} + {{- end }} + + forward_to = [] // No forward_to is used in this component, the defined rules are used in the loki.source.journal component + } + + loki.source.journal "worker" { + path = {{ .Values.journal.path | quote }} + format_as_json = {{ .Values.journal.formatAsJson }} + max_age = {{ .Values.journal.maxAge | quote }} + relabel_rules = loki.relabel.journal.rules + labels = { + job = {{ .Values.journal.jobLabel | quote }}, + instance = env("HOSTNAME"), + } + forward_to = [loki.process.journal_logs.receiver] + } + + loki.process "journal_logs" { + {{- if .Values.journal.extraLogProcessingBlocks }} + {{ tpl .Values.journal.extraLogProcessingBlocks . | indent 2 }} + {{ end }} + forward_to = argument.logs_destinations.value + } +} +{{- end -}} + +{{- define "feature.nodeLogs.alloyModules" }}{{- end }} diff --git a/charts/feature-node-logs/templates/_notes.tpl b/charts/feature-node-logs/templates/_notes.tpl new file mode 100644 index 000000000..ecdf628eb --- /dev/null +++ b/charts/feature-node-logs/templates/_notes.tpl @@ -0,0 +1,11 @@ +{{- define "feature.nodeLogs.notes.deployments" }}{{- end }} + +{{- define "feature.nodeLogs.notes.task" }} +Gather logs from Kubernetes Nodes +{{- end }} + +{{- define "feature.nodeLogs.notes.actions" }}{{- end }} + +{{- define "feature.nodeLogs.summary" -}} +version: {{ .Chart.Version }} +{{- end }} diff --git a/charts/feature-node-logs/templates/configmap.yaml b/charts/feature-node-logs/templates/configmap.yaml new file mode 100644 index 000000000..581522e51 --- /dev/null +++ b/charts/feature-node-logs/templates/configmap.yaml @@ -0,0 +1,11 @@ +{{- if .Values.deployAsConfigMap }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "feature.nodeLogs.fullname" . }} + namespace: {{ .Release.Namespace }} +data: + module.alloy: |- + {{- include "feature.nodeLogs.module" . | indent 4 }} +{{- end }} diff --git a/charts/feature-node-logs/tests/__snapshot__/.gitkeep b/charts/feature-node-logs/tests/__snapshot__/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/charts/feature-node-logs/tests/default_test.yaml b/charts/feature-node-logs/tests/default_test.yaml new file mode 100644 index 000000000..d43dead03 --- /dev/null +++ b/charts/feature-node-logs/tests/default_test.yaml @@ -0,0 +1,46 @@ +# yamllint disable rule:document-start rule:line-length rule:trailing-spaces +suite: Test default values +templates: + - configmap.yaml +tests: + - it: should render the default configuration + set: + deployAsConfigMap: true + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "node_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.relabel "journal" { + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "unit" + } + + forward_to = [] // No forward_to is used in this component, the defined rules are used in the loki.source.journal component + } + + loki.source.journal "worker" { + path = "/var/log/journal" + format_as_json = false + max_age = "8h" + relabel_rules = loki.relabel.journal.rules + labels = { + job = "integrations/kubernetes/journal", + instance = env("HOSTNAME"), + } + forward_to = [loki.process.journal_logs.receiver] + } + + loki.process "journal_logs" { + forward_to = argument.logs_destinations.value + } + } diff --git a/charts/feature-node-logs/values.schema.json b/charts/feature-node-logs/values.schema.json new file mode 100644 index 000000000..770bd6d9a --- /dev/null +++ b/charts/feature-node-logs/values.schema.json @@ -0,0 +1,41 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "deployAsConfigMap": { + "type": "boolean" + }, + "fullnameOverride": { + "type": "string" + }, + "journal": { + "type": "object", + "properties": { + "extraDiscoveryRules": { + "type": "string" + }, + "extraLogProcessingBlocks": { + "type": "string" + }, + "formatAsJson": { + "type": "boolean" + }, + "jobLabel": { + "type": "string" + }, + "maxAge": { + "type": "string" + }, + "path": { + "type": "string" + }, + "units": { + "type": "array" + } + } + }, + "nameOverride": { + "type": "string" + } + } +} diff --git a/charts/feature-node-logs/values.yaml b/charts/feature-node-logs/values.yaml new file mode 100644 index 000000000..6229ee2bc --- /dev/null +++ b/charts/feature-node-logs/values.yaml @@ -0,0 +1,50 @@ +--- +# -- Name override +# @section -- General settings +nameOverride: "" + +# -- Full name override +# @section -- General settings +fullnameOverride: "" + +journal: + # -- The path to the journal logs on the worker node. + # @section -- Journal Logs + path: "/var/log/journal" + + # -- The path to the journal logs on the worker node. + # @section -- Journal Logs + maxAge: "8h" + + # -- The value for the job label for journal logs. + # @section -- Journal Logs + jobLabel: "integrations/kubernetes/journal" + + # -- Whether to forward the original journal entry as JSON. + # @section -- Journal Logs + formatAsJson: false + + # -- The list of systemd units to keep scraped logs from. If empty, all units are scraped. + # @section -- Journal Logs + units: [] + # - kubelet.service + # - docker.service + # - containerd.service + + # -- Rule blocks to be added used with the loki.source.journal component for journal logs. + # These relabeling rules are applied pre-scrape against the targets from service discovery. + # Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. + # ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) + # **Note:** Many field names from journald start with an `_`, such as `_systemd_unit`. The final internal label name would + # be `__journal__systemd_unit`, with two underscores between `__journal` and `systemd_unit`. + # @section -- Journal Logs + extraDiscoveryRules: "" + + # -- Stage blocks to be added to the loki.process component for journal logs. + # ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/#blocks)) + # This value is templated so that you can refer to other values from this file. + # @section -- Journal Logs + extraLogProcessingBlocks: "" + +# @ignore +deployAsConfigMap: false diff --git a/charts/k8s-monitoring-v1/README.md b/charts/k8s-monitoring-v1/README.md index a7ff8bdb2..0f15b1532 100644 --- a/charts/k8s-monitoring-v1/README.md +++ b/charts/k8s-monitoring-v1/README.md @@ -449,6 +449,7 @@ The Prometheus and Loki services may be hosted on the same cluster, or remotely | Key | Type | Default | Description | |-----|------|---------|-------------| | logs.journal.enabled | bool | `false` | Scrape Kubernetes Worker Journal Logs event | +| logs.journal.extraRelabelingRules | string | `""` | Rule blocks to be added used with the loki.source.journal component for journal logs. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) **Note:** Many field names from journald start with an `_`, such as `_systemd_unit`. The final internal label name would be `__journal__systemd_unit`, with two underscores between `__journal` and `systemd_unit`. | | logs.journal.extraStageBlocks | string | `""` | Stage blocks to be added to the loki.process component for journal logs. ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/#blocks)) This value is templated so that you can refer to other values from this file. | | logs.journal.formatAsJson | bool | `false` | Whether to forward the original journal entry as JSON. | | logs.journal.jobLabel | string | `"integrations/kubernetes/journal"` | The value for the job label for journal logs | @@ -456,11 +457,19 @@ The Prometheus and Loki services may be hosted on the same cluster, or remotely | logs.journal.path | string | `"/var/log/journal"` | The path to the journal logs on the worker node | | logs.journal.units | list | `[]` | The list of systemd units to keep scraped logs from. If empty, all units are scraped. | +### Logs Scrape: PodLog Objects + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| logs.podLogsObjects.enabled | bool | `false` | Enable discovery of Grafana Alloy PodLogs objects. | +| logs.podLogsObjects.extraStageBlocks | string | `""` | Stage blocks to be added to the loki.process component for logs gathered via PodLogs objects. ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/#blocks)) This value is templated so that you can refer to other values from this file. | +| logs.podLogsObjects.namespaces | list | `[]` | Which namespaces to look for PodLogs objects. | +| logs.podLogsObjects.selector | string | `""` | Selector to filter which PodLogs objects to use. | + ### Logs Scrape: Pod Logs | Key | Type | Default | Description | |-----|------|---------|-------------| -| logs.journal.extraRelabelingRules | string | `""` | Rule blocks to be added used with the loki.source.journal component for journal logs. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) **Note:** Many field names from journald start with an `_`, such as `_systemd_unit`. The final internal label name would be `__journal__systemd_unit`, with two underscores between `__journal` and `systemd_unit`. | | logs.pod_logs.annotation | string | `"k8s.grafana.com/logs.autogather"` | Pod annotation to use for controlling log discovery. | | logs.pod_logs.annotations | object | `{"job":"k8s.grafana.com/logs.job"}` | Loki labels to set with values copied from the Kubernetes Pod annotations. Format: `: `. | | logs.pod_logs.discovery | string | `"all"` | Controls the behavior of discovering pods for logs. | @@ -473,15 +482,6 @@ The Prometheus and Loki services may be hosted on the same cluster, or remotely | logs.pod_logs.namespaces | list | `[]` | Only capture logs from pods in these namespaces (`[]` means all namespaces). | | logs.pod_logs.structuredMetadata | object | `{}` | List of labels to turn into structured metadata. If your Loki instance does not support structured metadata, leave this empty. Format: `: `. | -### Logs Scrape: PodLog Objects - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| logs.podLogsObjects.enabled | bool | `false` | Enable discovery of Grafana Alloy PodLogs objects. | -| logs.podLogsObjects.extraStageBlocks | string | `""` | Stage blocks to be added to the loki.process component for logs gathered via PodLogs objects. ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.process/#blocks)) This value is templated so that you can refer to other values from this file. | -| logs.podLogsObjects.namespaces | list | `[]` | Which namespaces to look for PodLogs objects. | -| logs.podLogsObjects.selector | string | `""` | Selector to filter which PodLogs objects to use. | - ### Logs Receiver | Key | Type | Default | Description | diff --git a/charts/k8s-monitoring-v1/values.yaml b/charts/k8s-monitoring-v1/values.yaml index 892a7c3d9..505cf7b54 100644 --- a/charts/k8s-monitoring-v1/values.yaml +++ b/charts/k8s-monitoring-v1/values.yaml @@ -1799,7 +1799,7 @@ logs: # ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery/discovery.relabel/#rule-block)) # **Note:** Many field names from journald start with an `_`, such as `_systemd_unit`. The final internal label name would # be `__journal__systemd_unit`, with two underscores between `__journal` and `systemd_unit`. - # @section -- Logs Scrape: Pod Logs + # @section -- Logs Scrape: Journal extraRelabelingRules: "" # Settings related to logs ingested via receivers diff --git a/charts/k8s-monitoring/.updatecli-alloy.yaml b/charts/k8s-monitoring/.updatecli-alloy.yaml index b94001f48..13d287388 100644 --- a/charts/k8s-monitoring/.updatecli-alloy.yaml +++ b/charts/k8s-monitoring/.updatecli-alloy.yaml @@ -16,7 +16,7 @@ conditions: kind: yaml spec: file: charts/k8s-monitoring/Chart.yaml - key: $.dependencies[9].name + key: $.dependencies[10].name value: alloy disablesourceinput: true targets: @@ -25,7 +25,7 @@ targets: kind: helmchart spec: file: Chart.yaml - key: $.dependencies[9].version + key: $.dependencies[10].version name: charts/k8s-monitoring versionincrement: none sourceid: alloy @@ -34,7 +34,7 @@ targets: kind: helmchart spec: file: Chart.yaml - key: $.dependencies[10].version + key: $.dependencies[11].version name: charts/k8s-monitoring versionincrement: none sourceid: alloy @@ -43,7 +43,7 @@ targets: kind: helmchart spec: file: Chart.yaml - key: $.dependencies[11].version + key: $.dependencies[12].version name: charts/k8s-monitoring versionincrement: none sourceid: alloy @@ -52,7 +52,7 @@ targets: kind: helmchart spec: file: Chart.yaml - key: $.dependencies[12].version + key: $.dependencies[13].version name: charts/k8s-monitoring versionincrement: none sourceid: alloy @@ -61,7 +61,7 @@ targets: kind: helmchart spec: file: Chart.yaml - key: $.dependencies[13].version + key: $.dependencies[14].version name: charts/k8s-monitoring versionincrement: none sourceid: alloy diff --git a/charts/k8s-monitoring/Chart.lock b/charts/k8s-monitoring/Chart.lock index 611a87c84..92575faee 100644 --- a/charts/k8s-monitoring/Chart.lock +++ b/charts/k8s-monitoring/Chart.lock @@ -17,6 +17,9 @@ dependencies: - name: k8s-monitoring-feature-integrations repository: file://../feature-integrations version: 1.0.0 +- name: k8s-monitoring-feature-node-logs + repository: file://../feature-node-logs + version: 1.0.0 - name: k8s-monitoring-feature-pod-logs repository: file://../feature-pod-logs version: 1.0.0 @@ -41,5 +44,5 @@ dependencies: - name: alloy repository: https://grafana.github.io/helm-charts version: 0.10.0 -digest: sha256:c42e09be38582ced6f973de7fd7d2f5d96c1e926e187936e0ee1d7ae295c9e0f -generated: "2024-11-21T18:59:30.945426-06:00" +digest: sha256:5805f8a2eb47cc7ed65d2f8b94d07a3bf474143d179bf781be69c5f7ce1a4d00 +generated: "2024-11-26T14:43:42.607469-06:00" diff --git a/charts/k8s-monitoring/Chart.yaml b/charts/k8s-monitoring/Chart.yaml index abcfb27d7..088fd85b8 100644 --- a/charts/k8s-monitoring/Chart.yaml +++ b/charts/k8s-monitoring/Chart.yaml @@ -41,6 +41,11 @@ dependencies: name: k8s-monitoring-feature-integrations repository: file://../feature-integrations version: 1.0.0 + - alias: nodeLogs + name: k8s-monitoring-feature-node-logs + repository: file://../feature-node-logs + version: 1.0.0 + condition: nodeLogs.enabled - alias: podLogs name: k8s-monitoring-feature-pod-logs repository: file://../feature-pod-logs diff --git a/charts/k8s-monitoring/Makefile b/charts/k8s-monitoring/Makefile index da521f9b8..57138b016 100644 --- a/charts/k8s-monitoring/Makefile +++ b/charts/k8s-monitoring/Makefile @@ -75,12 +75,18 @@ else endif # Example targets -EXAMPLE_RELEASE_NAME=ko +EXAMPLE_RELEASE_NAME=k8smon EXAMPLE_VALUES_FILES = $(shell find docs/examples -name values.yaml) EXAMPLE_OUTPUT_FILES = $(EXAMPLE_VALUES_FILES:values.yaml=output.yaml) EXAMPLE_ALLOY_FILES = $(foreach file,$(EXAMPLE_VALUES_FILES),$(call alloy_configs, $(file))) EXAMPLE_README_FILES = $(EXAMPLE_VALUES_FILES:values.yaml=README.md) +INTEGRATION_TEST_VALUES_FILES = $(shell find tests/integration -name values.yaml) +INTEGRATION_TEST_OUTPUT_FILES = $(INTEGRATION_TEST_VALUES_FILES:values.yaml=.rendered/output.yaml) + +PLATFORM_TEST_VALUES_FILES = $(shell find tests/platform -name values.yaml) +PLATFORM_TEST_OUTPUT_FILES = $(PLATFORM_TEST_VALUES_FILES:values.yaml=.rendered/output.yaml) + alloy_configs = $(shell \ DIR="$(shell dirname $(1))/" yq e 'with_entries(select(.key == "alloy-*")) | select(.[].enabled == true) | to_entries | env(DIR) + .[].key + ".alloy"' $(1) \ ) @@ -94,6 +100,10 @@ Chart.lock: Chart.yaml %/output.yaml: %/values.yaml Chart.yaml Chart.lock values.yaml values.schema.json templates/destinations/_destination_types.tpl $(DESTINATION_VALUES_FILES) $(CHART_TEMPLATE_FILES) $(CHART_YAML_FILES) alloyModules/LICENSE $(ALLOY_MODULE_FILES) helm template $(EXAMPLE_RELEASE_NAME) . -f $< > $@ +%/.rendered/output.yaml: %/values.yaml Chart.yaml Chart.lock values.yaml values.schema.json templates/destinations/_destination_types.tpl $(DESTINATION_VALUES_FILES) $(CHART_TEMPLATE_FILES) $(CHART_YAML_FILES) alloyModules/LICENSE $(ALLOY_MODULE_FILES) + mkdir -p $(dir $@) + helm template $(EXAMPLE_RELEASE_NAME) . -f $< > $@ + %/alloy-logs.alloy: %/output.yaml yq 'select(.kind=="ConfigMap" and .metadata.name=="$(EXAMPLE_RELEASE_NAME)-alloy-logs") | .data["config.alloy"]' $< > $@ @@ -138,14 +148,19 @@ else docker run --platform linux/amd64 --rm --volume $(shell pwd):/src shellspec/shellspec -c /src/tests/example-checks -s /bin/sh endif +.PHONY: integration-test-checks +integration-test-checks: $(INTEGRATION_TEST_OUTPUT_FILES) +.PHONY: platform-test-checks +platform-test-checks: $(PLATFORM_TEST_OUTPUT_FILES) + .PHONY: clean clean: rm -f README.md values.schema.json $(UPDATECLI_FILES) templates/destinations/_destination_types.tpl schema-mods/destination-list.json $(DESTINATION_SCHEMA_FILES) $(DESTINATION_DOCS_FILES) - rm -f $(EXAMPLE_OUTPUT_FILES) $(EXAMPLE_ALLOY_FILES) $(EXAMPLE_README_FILES) + rm -f $(EXAMPLE_OUTPUT_FILES) $(EXAMPLE_ALLOY_FILES) $(EXAMPLE_README_FILES) $(INTEGRATION_TEST_OUTPUT_FILES) $(PLATFORM_TEST_OUTPUT_FILES) # Build targets .PHONY: build -build: README.md alloyModules/LICENSE values.schema.json templates/destinations/_destination_types.tpl $(DESTINATION_DOCS_FILES) $(UPDATECLI_FILES) examples +build: README.md alloyModules/LICENSE values.schema.json templates/destinations/_destination_types.tpl $(DESTINATION_DOCS_FILES) $(UPDATECLI_FILES) examples integration-test-checks platform-test-checks # Test targets .PHONY: test unittest lint-helm lint-configs diff --git a/charts/k8s-monitoring/README.md b/charts/k8s-monitoring/README.md index 133ee2d49..f44217d74 100644 --- a/charts/k8s-monitoring/README.md +++ b/charts/k8s-monitoring/README.md @@ -129,6 +129,7 @@ podLogs: | file://../feature-cluster-events | clusterEvents(k8s-monitoring-feature-cluster-events) | 1.0.0 | | file://../feature-cluster-metrics | clusterMetrics(k8s-monitoring-feature-cluster-metrics) | 1.0.0 | | file://../feature-integrations | integrations(k8s-monitoring-feature-integrations) | 1.0.0 | +| file://../feature-node-logs | nodeLogs(k8s-monitoring-feature-node-logs) | 1.0.0 | | file://../feature-pod-logs | podLogs(k8s-monitoring-feature-pod-logs) | 1.0.0 | | file://../feature-profiling | profiling(k8s-monitoring-feature-profiling) | 1.0.0 | | file://../feature-prometheus-operator-objects | prometheusOperatorObjects(k8s-monitoring-feature-prometheus-operator-objects) | 1.0.0 | @@ -336,6 +337,14 @@ podLogs: | integrations | object | No integrations enabled | Service Integrations enables gathering telemetry data for common services and applications deployed to Kubernetes. To see the valid options, please see the [Service Integrations documentation](https://github.com/grafana/k8s-monitoring-helm/tree/main/charts/feature-integrations). | | integrations.destinations | list | `[]` | The destinations where integration metrics will be sent. If empty, all metrics-capable destinations will be used. | +### Features - Node Logs + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| nodeLogs | object | Disabled | Node logs. Requires a destination that supports logs. To see the valid options, please see the [Node Logs feature documentation](https://github.com/grafana/k8s-monitoring-helm/tree/main/charts/feature-node-logs). | +| nodeLogs.destinations | list | `[]` | The destinations where logs will be sent. If empty, all logs-capable destinations will be used. | +| nodeLogs.enabled | bool | `false` | Enable gathering Kubernetes Cluster Node logs. | + ### Features - Pod Logs | Key | Type | Default | Description | diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz index 972d7c2f0..03d25bcd4 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-annotation-autodiscovery-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz index 87f1cfa4e..ef75be977 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-application-observability-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-auto-instrumentation-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-auto-instrumentation-1.0.0.tgz index d790fb338..156d6cdf4 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-auto-instrumentation-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-auto-instrumentation-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz index 893bc3fc7..3878cd603 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-events-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz index 3ea62378f..975f1a587 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-cluster-metrics-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz index 0c6874c16..00e52b9cd 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-integrations-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-node-logs-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-node-logs-1.0.0.tgz new file mode 100644 index 000000000..ca01dbc82 Binary files /dev/null and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-node-logs-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz index e1b33611d..416e2d9dc 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-pod-logs-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz index b3e9c92c0..de63515da 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-profiling-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz b/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz index 91fcdcb73..0ee8a5364 100644 Binary files a/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz and b/charts/k8s-monitoring/charts/k8s-monitoring-feature-prometheus-operator-objects-1.0.0.tgz differ diff --git a/charts/k8s-monitoring/docs/Features.md b/charts/k8s-monitoring/docs/Features.md index 468923bda..5322f092b 100644 --- a/charts/k8s-monitoring/docs/Features.md +++ b/charts/k8s-monitoring/docs/Features.md @@ -7,6 +7,7 @@ These are the current features supported in this Helm chart: - [Application Observability](#application-observability) - [Annotation Autodiscovery](#annotation-autodiscovery) - [Prometheus Operator Objects](#prometheus-operator-objects) +- [Node Logs](#node-logs) - [Pod Logs](#pod-logs) - [Service Integrations](#service-integrations) - [Profiling](#profiling) @@ -42,6 +43,12 @@ Collects metrics from Pods and Services that use a specific annotation. Collects metrics from Prometheus Operator objects, like PodMonitors and ServiceMonitors. +## Node Logs + +[Documentation](https://github.com/grafana/k8s-monitoring-helm/tree/main/charts/feature-node-logs) + +Collects logs from Kubernetes Cluster Nodes. + ## Pod Logs [Documentation](https://github.com/grafana/k8s-monitoring-helm/tree/main/charts/feature-pod-logs) diff --git a/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-metrics.alloy index d20dcd98d..8bf3ae289 100644 --- a/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-metrics.alloy @@ -49,7 +49,7 @@ prometheus.remote_write "prometheus" { } remote.kubernetes.secret "prometheus" { - name = "prometheus-ko-k8s-monitoring" + name = "prometheus-k8smon-k8s-monitoring" namespace = "default" } @@ -111,7 +111,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-receiver.alloy b/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-receiver.alloy index d5bb49ed5..ef0ddb011 100644 --- a/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-receiver.alloy +++ b/charts/k8s-monitoring/docs/examples/auth/bearer-token/alloy-receiver.alloy @@ -49,7 +49,7 @@ prometheus.remote_write "prometheus" { } remote.kubernetes.secret "prometheus" { - name = "prometheus-ko-k8s-monitoring" + name = "prometheus-k8smon-k8s-monitoring" namespace = "default" } diff --git a/charts/k8s-monitoring/docs/examples/auth/bearer-token/output.yaml b/charts/k8s-monitoring/docs/examples/auth/bearer-token/output.yaml index d80dc8521..8f0e65ccd 100644 --- a/charts/k8s-monitoring/docs/examples/auth/bearer-token/output.yaml +++ b/charts/k8s-monitoring/docs/examples/auth/bearer-token/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -35,12 +35,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -51,7 +51,7 @@ metadata: apiVersion: v1 kind: Secret metadata: - name: "prometheus-ko-k8s-monitoring" + name: "prometheus-k8smon-k8s-monitoring" namespace: "default" type: Opaque data: @@ -61,7 +61,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -116,7 +116,7 @@ data: } remote.kubernetes.secret "prometheus" { - name = "prometheus-ko-k8s-monitoring" + name = "prometheus-k8smon-k8s-monitoring" namespace = "default" } @@ -178,7 +178,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -225,7 +225,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -378,7 +378,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default data: config.alloy: |- @@ -433,7 +433,7 @@ data: } remote.kubernetes.secret "prometheus" { - name = "prometheus-ko-k8s-monitoring" + name = "prometheus-k8smon-k8s-monitoring" namespace = "default" } @@ -636,11 +636,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -736,11 +736,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -836,11 +836,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -936,11 +936,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -949,21 +949,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -972,21 +972,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -995,21 +995,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-receiver + name: k8smon-alloy-receiver subjects: - kind: ServiceAccount - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1019,7 +1019,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -1031,11 +1031,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1047,7 +1047,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -1064,11 +1064,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1078,7 +1078,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -1090,11 +1090,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1104,7 +1104,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -1120,11 +1120,11 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1134,16 +1134,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -1224,7 +1224,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log @@ -1236,11 +1236,11 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1250,16 +1250,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-receiver + serviceAccountName: k8smon-alloy-receiver containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -1334,17 +1334,17 @@ spec: volumes: - name: config configMap: - name: ko-alloy-receiver + name: k8smon-alloy-receiver --- # Source: k8s-monitoring/charts/alloy-metrics/templates/controllers/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1353,11 +1353,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -1365,9 +1365,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -1379,7 +1379,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -1442,4 +1442,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-metrics.alloy index ce301572e..3814b0fd1 100644 --- a/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/alloy-metrics.alloy @@ -110,7 +110,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/output.yaml b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/output.yaml index 0431d688a..c9536ceb1 100644 --- a/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/output.yaml +++ b/charts/k8s-monitoring/docs/examples/auth/embedded-secrets/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -35,12 +35,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -51,7 +51,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -167,7 +167,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -214,7 +214,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -367,7 +367,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default data: config.alloy: |- @@ -613,11 +613,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -713,11 +713,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -813,11 +813,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -913,11 +913,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -926,21 +926,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -949,21 +949,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -972,21 +972,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-receiver + name: k8smon-alloy-receiver subjects: - kind: ServiceAccount - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -996,7 +996,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -1008,11 +1008,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1024,7 +1024,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -1041,11 +1041,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1055,7 +1055,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -1067,11 +1067,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1081,7 +1081,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -1097,11 +1097,11 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1111,16 +1111,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -1198,7 +1198,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log @@ -1210,11 +1210,11 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1224,16 +1224,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-receiver + serviceAccountName: k8smon-alloy-receiver containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -1308,17 +1308,17 @@ spec: volumes: - name: config configMap: - name: ko-alloy-receiver + name: k8smon-alloy-receiver --- # Source: k8s-monitoring/charts/alloy-metrics/templates/controllers/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1327,11 +1327,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -1339,9 +1339,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -1353,7 +1353,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -1416,4 +1416,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-metrics.alloy index dd5f57b8e..987798a5a 100644 --- a/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/auth/external-secrets/alloy-metrics.alloy @@ -118,7 +118,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/auth/external-secrets/output.yaml b/charts/k8s-monitoring/docs/examples/auth/external-secrets/output.yaml index 17e47e6fa..66b199fc5 100644 --- a/charts/k8s-monitoring/docs/examples/auth/external-secrets/output.yaml +++ b/charts/k8s-monitoring/docs/examples/auth/external-secrets/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -35,12 +35,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -51,7 +51,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -175,7 +175,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -222,7 +222,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -387,7 +387,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default data: config.alloy: |- @@ -665,11 +665,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -765,11 +765,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -865,11 +865,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -965,11 +965,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -978,21 +978,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1001,21 +1001,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1024,21 +1024,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-receiver + name: k8smon-alloy-receiver subjects: - kind: ServiceAccount - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1048,7 +1048,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -1060,11 +1060,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1076,7 +1076,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -1093,11 +1093,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1107,7 +1107,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -1119,11 +1119,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1133,7 +1133,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -1149,11 +1149,11 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1163,16 +1163,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -1250,7 +1250,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log @@ -1262,11 +1262,11 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1276,16 +1276,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-receiver + serviceAccountName: k8smon-alloy-receiver containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -1360,17 +1360,17 @@ spec: volumes: - name: config configMap: - name: ko-alloy-receiver + name: k8smon-alloy-receiver --- # Source: k8s-monitoring/charts/alloy-metrics/templates/controllers/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1379,11 +1379,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -1391,9 +1391,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -1405,7 +1405,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -1468,4 +1468,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/auth/sigv4/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/auth/sigv4/alloy-metrics.alloy index a5ada80e6..3c5d36614 100644 --- a/charts/k8s-monitoring/docs/examples/auth/sigv4/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/auth/sigv4/alloy-metrics.alloy @@ -53,7 +53,7 @@ prometheus.remote_write "prometheus" { } remote.kubernetes.secret "prometheus" { - name = "prometheus-ko-k8s-monitoring" + name = "prometheus-k8smon-k8s-monitoring" namespace = "default" } @@ -64,7 +64,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -171,7 +171,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -182,7 +182,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -196,7 +196,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -207,7 +207,7 @@ declare "cluster_metrics" { node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -237,7 +237,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -289,7 +289,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/auth/sigv4/output.yaml b/charts/k8s-monitoring/docs/examples/auth/sigv4/output.yaml index 70043e711..e27f3ded4 100644 --- a/charts/k8s-monitoring/docs/examples/auth/sigv4/output.yaml +++ b/charts/k8s-monitoring/docs/examples/auth/sigv4/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -26,17 +26,17 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -44,16 +44,16 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon automountServiceAccountToken: false --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -61,15 +61,15 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon --- # Source: k8s-monitoring/templates/destination_secret.yaml apiVersion: v1 kind: Secret metadata: - name: "prometheus-ko-k8s-monitoring" + name: "prometheus-k8smon-k8s-monitoring" namespace: "default" type: Opaque data: @@ -80,7 +80,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -88,9 +88,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -103,7 +103,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -162,7 +162,7 @@ data: } remote.kubernetes.secret "prometheus" { - name = "prometheus-ko-k8s-monitoring" + name = "prometheus-k8smon-k8s-monitoring" namespace = "default" } @@ -173,7 +173,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -280,7 +280,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -291,7 +291,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -305,7 +305,7 @@ data: } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -316,7 +316,7 @@ data: node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -346,7 +346,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -398,7 +398,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -443,7 +443,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -1640,7 +1640,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-system + name: k8smon-alloy-module-system data: node-exporter_metrics.alloy: | /* @@ -1900,11 +1900,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2006,10 +2006,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -2156,11 +2156,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2169,10 +2169,10 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -2185,28 +2185,28 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2218,7 +2218,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -2235,11 +2235,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2249,7 +2249,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2261,7 +2261,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2269,9 +2269,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -2283,13 +2283,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2297,9 +2297,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2311,13 +2311,13 @@ spec: name: metrics selector: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2325,9 +2325,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2340,13 +2340,13 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2354,14 +2354,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon revisionHistoryLimit: 10 updateStrategy: rollingUpdate: @@ -2378,9 +2378,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2388,7 +2388,7 @@ spec: runAsGroup: 65534 runAsNonRoot: true runAsUser: 65534 - serviceAccountName: ko-node-exporter + serviceAccountName: k8smon-node-exporter containers: - name: node-exporter image: quay.io/prometheus/node-exporter:v1.8.2 @@ -2473,7 +2473,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2481,14 +2481,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -2504,9 +2504,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2518,7 +2518,7 @@ spec: image: ghcr.io/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: ghcr.io/prometheus-community/windows-exporter:0.29.2 @@ -2568,13 +2568,13 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2582,14 +2582,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -2602,13 +2602,13 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -2664,11 +2664,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2677,11 +2677,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -2689,9 +2689,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -2703,7 +2703,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -2766,4 +2766,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/autoscaling/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/autoscaling/alloy-metrics.alloy index d12822c60..579b52685 100644 --- a/charts/k8s-monitoring/docs/examples/autoscaling/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/autoscaling/alloy-metrics.alloy @@ -54,7 +54,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -161,7 +161,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -172,7 +172,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -186,7 +186,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -197,7 +197,7 @@ declare "cluster_metrics" { node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -227,7 +227,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -279,7 +279,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/autoscaling/output.yaml b/charts/k8s-monitoring/docs/examples/autoscaling/output.yaml index 581f40187..53ee00c2d 100644 --- a/charts/k8s-monitoring/docs/examples/autoscaling/output.yaml +++ b/charts/k8s-monitoring/docs/examples/autoscaling/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -26,17 +26,17 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -44,16 +44,16 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon automountServiceAccountToken: false --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -61,15 +61,15 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/config.yaml apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -77,9 +77,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -92,7 +92,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -152,7 +152,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -259,7 +259,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -270,7 +270,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -284,7 +284,7 @@ data: } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -295,7 +295,7 @@ data: node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -325,7 +325,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -377,7 +377,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -422,7 +422,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -1619,7 +1619,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-system + name: k8smon-alloy-module-system data: node-exporter_metrics.alloy: | /* @@ -1879,11 +1879,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1985,10 +1985,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -2135,11 +2135,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2148,10 +2148,10 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -2164,28 +2164,28 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2197,7 +2197,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -2214,11 +2214,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2228,7 +2228,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2240,7 +2240,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2248,9 +2248,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -2262,13 +2262,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2276,9 +2276,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2290,13 +2290,13 @@ spec: name: metrics selector: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2304,9 +2304,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2319,13 +2319,13 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2333,14 +2333,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon revisionHistoryLimit: 10 updateStrategy: rollingUpdate: @@ -2357,9 +2357,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2367,7 +2367,7 @@ spec: runAsGroup: 65534 runAsNonRoot: true runAsUser: 65534 - serviceAccountName: ko-node-exporter + serviceAccountName: k8smon-node-exporter containers: - name: node-exporter image: quay.io/prometheus/node-exporter:v1.8.2 @@ -2452,7 +2452,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2460,14 +2460,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -2483,9 +2483,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2497,7 +2497,7 @@ spec: image: ghcr.io/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: ghcr.io/prometheus-community/windows-exporter:0.29.2 @@ -2547,13 +2547,13 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2561,14 +2561,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -2581,13 +2581,13 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -2643,11 +2643,11 @@ spec: apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2657,7 +2657,7 @@ spec: scaleTargetRef: apiVersion: apps/v1 kind: statefulset - name: ko-alloy-metrics + name: k8smon-alloy-metrics minReplicas: 2 maxReplicas: 10 behavior: @@ -2680,11 +2680,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2692,11 +2692,11 @@ metadata: spec: podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -2704,9 +2704,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -2718,7 +2718,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -2785,4 +2785,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/collector-storage/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/collector-storage/alloy-metrics.alloy index 6a62bf7f5..8b35099a7 100644 --- a/charts/k8s-monitoring/docs/examples/collector-storage/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/collector-storage/alloy-metrics.alloy @@ -54,7 +54,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -161,7 +161,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -172,7 +172,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -186,7 +186,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -197,7 +197,7 @@ declare "cluster_metrics" { node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -227,7 +227,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -279,7 +279,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/collector-storage/output.yaml b/charts/k8s-monitoring/docs/examples/collector-storage/output.yaml index 43dfb65f6..a55ca4182 100644 --- a/charts/k8s-monitoring/docs/examples/collector-storage/output.yaml +++ b/charts/k8s-monitoring/docs/examples/collector-storage/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -42,17 +42,17 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -60,16 +60,16 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon automountServiceAccountToken: false --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -77,15 +77,15 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/config.yaml apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -93,9 +93,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -108,7 +108,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -168,7 +168,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -275,7 +275,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -286,7 +286,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -300,7 +300,7 @@ data: } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -311,7 +311,7 @@ data: node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -341,7 +341,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -393,7 +393,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -439,7 +439,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -591,7 +591,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -1788,7 +1788,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-system + name: k8smon-alloy-module-system data: node-exporter_metrics.alloy: | /* @@ -2048,11 +2048,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2148,11 +2148,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2254,10 +2254,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -2404,11 +2404,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2417,21 +2417,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2440,10 +2440,10 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -2456,28 +2456,28 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2487,7 +2487,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2499,11 +2499,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2515,7 +2515,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -2532,11 +2532,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2546,7 +2546,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2558,7 +2558,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2566,9 +2566,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -2580,13 +2580,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2594,9 +2594,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2608,13 +2608,13 @@ spec: name: metrics selector: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2622,9 +2622,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2637,17 +2637,17 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/alloy-logs/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2657,16 +2657,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -2747,7 +2747,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log @@ -2763,7 +2763,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2771,14 +2771,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon revisionHistoryLimit: 10 updateStrategy: rollingUpdate: @@ -2795,9 +2795,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2805,7 +2805,7 @@ spec: runAsGroup: 65534 runAsNonRoot: true runAsUser: 65534 - serviceAccountName: ko-node-exporter + serviceAccountName: k8smon-node-exporter containers: - name: node-exporter image: quay.io/prometheus/node-exporter:v1.8.2 @@ -2890,7 +2890,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2898,14 +2898,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -2921,9 +2921,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2935,7 +2935,7 @@ spec: image: ghcr.io/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: ghcr.io/prometheus-community/windows-exporter:0.29.2 @@ -2985,13 +2985,13 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2999,14 +2999,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -3019,13 +3019,13 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -3081,11 +3081,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3094,11 +3094,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -3106,9 +3106,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3120,7 +3120,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -3186,7 +3186,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics volumeClaimTemplates: - metadata: diff --git a/charts/k8s-monitoring/docs/examples/extra-rules/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/extra-rules/alloy-metrics.alloy index f444ee55f..0b22a9ca1 100644 --- a/charts/k8s-monitoring/docs/examples/extra-rules/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/extra-rules/alloy-metrics.alloy @@ -63,7 +63,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -170,7 +170,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -181,7 +181,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -195,7 +195,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -206,7 +206,7 @@ declare "cluster_metrics" { node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -236,7 +236,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } diff --git a/charts/k8s-monitoring/docs/examples/extra-rules/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/extra-rules/alloy-singleton.alloy index c22ceeb55..cc59fabd7 100644 --- a/charts/k8s-monitoring/docs/examples/extra-rules/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/extra-rules/alloy-singleton.alloy @@ -135,7 +135,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/extra-rules/output.yaml b/charts/k8s-monitoring/docs/examples/extra-rules/output.yaml index d3ab33919..f9f5e7bf4 100644 --- a/charts/k8s-monitoring/docs/examples/extra-rules/output.yaml +++ b/charts/k8s-monitoring/docs/examples/extra-rules/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -35,12 +35,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -58,17 +58,17 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -76,16 +76,16 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon automountServiceAccountToken: false --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -93,15 +93,15 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/config.yaml apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -109,9 +109,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -124,7 +124,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -193,7 +193,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -300,7 +300,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -311,7 +311,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -325,7 +325,7 @@ data: } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -336,7 +336,7 @@ data: node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -366,7 +366,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -409,7 +409,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default data: config.alloy: |- @@ -550,7 +550,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -597,7 +597,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -777,7 +777,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -1974,7 +1974,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-system + name: k8smon-alloy-module-system data: node-exporter_metrics.alloy: | /* @@ -2234,11 +2234,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2334,11 +2334,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2434,11 +2434,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2540,10 +2540,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -2690,11 +2690,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2703,21 +2703,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2726,21 +2726,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-singleton/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2749,10 +2749,10 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-singleton + name: k8smon-alloy-singleton subjects: - kind: ServiceAccount - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -2765,28 +2765,28 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2796,7 +2796,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2808,11 +2808,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2824,7 +2824,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -2841,11 +2841,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2855,7 +2855,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2867,11 +2867,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2881,7 +2881,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2893,7 +2893,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2901,9 +2901,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -2915,13 +2915,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2929,9 +2929,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2943,13 +2943,13 @@ spec: name: metrics selector: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2957,9 +2957,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2972,17 +2972,17 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/alloy-logs/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2992,16 +2992,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3082,7 +3082,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log @@ -3094,7 +3094,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -3102,14 +3102,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon revisionHistoryLimit: 10 updateStrategy: rollingUpdate: @@ -3126,9 +3126,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -3136,7 +3136,7 @@ spec: runAsGroup: 65534 runAsNonRoot: true runAsUser: 65534 - serviceAccountName: ko-node-exporter + serviceAccountName: k8smon-node-exporter containers: - name: node-exporter image: quay.io/prometheus/node-exporter:v1.8.2 @@ -3221,7 +3221,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -3229,14 +3229,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -3252,9 +3252,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -3266,7 +3266,7 @@ spec: image: ghcr.io/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: ghcr.io/prometheus-community/windows-exporter:0.29.2 @@ -3316,17 +3316,17 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/alloy-singleton/templates/controllers/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3337,7 +3337,7 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -3345,9 +3345,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-singleton + serviceAccountName: k8smon-alloy-singleton containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3422,13 +3422,13 @@ spec: volumes: - name: config configMap: - name: ko-alloy-singleton + name: k8smon-alloy-singleton --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -3436,14 +3436,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -3456,13 +3456,13 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -3518,11 +3518,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3531,11 +3531,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -3543,9 +3543,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3557,7 +3557,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -3623,4 +3623,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/default/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/default/alloy-metrics.alloy index 902b5ea83..fdb4c143f 100644 --- a/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/default/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/default/alloy-metrics.alloy @@ -261,7 +261,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/default/output.yaml b/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/default/output.yaml index 98e890c56..1b90a6bd1 100644 --- a/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/default/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/default/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,7 +19,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -286,7 +286,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -331,11 +331,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -431,11 +431,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -444,21 +444,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -470,7 +470,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -487,11 +487,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -501,7 +501,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -513,11 +513,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -526,11 +526,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -538,9 +538,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -552,7 +552,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -615,4 +615,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/prom-annotations/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/prom-annotations/alloy-metrics.alloy index 889a7de61..9339707cb 100644 --- a/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/prom-annotations/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/prom-annotations/alloy-metrics.alloy @@ -261,7 +261,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/prom-annotations/output.yaml b/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/prom-annotations/output.yaml index 553701d68..b11110e7d 100644 --- a/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/prom-annotations/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/annotation-autodiscovery/prom-annotations/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,7 +19,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -286,7 +286,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -331,11 +331,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -431,11 +431,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -444,21 +444,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -470,7 +470,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -487,11 +487,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -501,7 +501,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -513,11 +513,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -526,11 +526,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -538,9 +538,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -552,7 +552,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -615,4 +615,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/applicaiton-observability/default/alloy-receiver.alloy b/charts/k8s-monitoring/docs/examples/features/applicaiton-observability/default/alloy-receiver.alloy index 8c317f870..faf55306a 100644 --- a/charts/k8s-monitoring/docs/examples/features/applicaiton-observability/default/alloy-receiver.alloy +++ b/charts/k8s-monitoring/docs/examples/features/applicaiton-observability/default/alloy-receiver.alloy @@ -183,7 +183,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/features/applicaiton-observability/default/output.yaml b/charts/k8s-monitoring/docs/examples/features/applicaiton-observability/default/output.yaml index 87670de20..433918389 100644 --- a/charts/k8s-monitoring/docs/examples/features/applicaiton-observability/default/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/applicaiton-observability/default/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,7 +19,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default data: config.alloy: |- @@ -208,7 +208,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -253,11 +253,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -353,11 +353,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -366,21 +366,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-receiver + name: k8smon-alloy-receiver subjects: - kind: ServiceAccount - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default --- # Source: k8s-monitoring/charts/alloy-receiver/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -390,7 +390,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -406,11 +406,11 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -420,16 +420,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-receiver + serviceAccountName: k8smon-alloy-receiver containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -504,4 +504,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-receiver + name: k8smon-alloy-receiver diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/alloy-metrics.alloy index 7ea85070d..abb63bd1a 100644 --- a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/alloy-metrics.alloy @@ -126,7 +126,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/output.yaml b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/output.yaml index 20f98caee..641f305ec 100644 --- a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics-and-traces/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -35,12 +35,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-beyla + name: k8smon-beyla namespace: default labels: helm.sh/chart: beyla-1.4.13 app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.8" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: beyla @@ -51,7 +51,7 @@ automountServiceAccountToken: true apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -183,7 +183,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -229,7 +229,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default data: config.alloy: |- @@ -409,12 +409,12 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-beyla + name: k8smon-beyla namespace: default labels: helm.sh/chart: beyla-1.4.13 app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.8" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: beyla @@ -455,17 +455,17 @@ data: - application_service_graph - application_span otel_traces_export: - endpoint: http://ko-alloy-receiver.default.svc.cluster.local:4317 + endpoint: http://k8smon-alloy-receiver.default.svc.cluster.local:4317 --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -561,11 +561,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -661,11 +661,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-beyla + name: k8smon-beyla labels: helm.sh/chart: beyla-1.4.13 app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.8" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: beyla @@ -682,11 +682,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -695,21 +695,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -718,43 +718,43 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-receiver + name: k8smon-alloy-receiver subjects: - kind: ServiceAccount - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default --- # Source: k8s-monitoring/charts/autoInstrumentation/charts/beyla/templates/cluster-role-binding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-beyla + name: k8smon-beyla labels: helm.sh/chart: beyla-1.4.13 app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.8" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: beyla app.kubernetes.io/component: rbac subjects: - kind: ServiceAccount - name: ko-beyla + name: k8smon-beyla namespace: default roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-beyla + name: k8smon-beyla --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -766,7 +766,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -783,11 +783,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -797,7 +797,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -809,11 +809,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -823,7 +823,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -839,11 +839,11 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -853,16 +853,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-receiver + serviceAccountName: k8smon-alloy-receiver containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -937,18 +937,18 @@ spec: volumes: - name: config configMap: - name: ko-alloy-receiver + name: k8smon-alloy-receiver --- # Source: k8s-monitoring/charts/autoInstrumentation/charts/beyla/templates/daemon-set.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-beyla + name: k8smon-beyla namespace: default labels: helm.sh/chart: beyla-1.4.13 app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.8" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: beyla @@ -957,7 +957,7 @@ spec: selector: matchLabels: app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: type: RollingUpdate template: @@ -968,13 +968,13 @@ spec: labels: helm.sh/chart: beyla-1.4.13 app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.8" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: beyla app.kubernetes.io/component: workload spec: - serviceAccountName: ko-beyla + serviceAccountName: k8smon-beyla hostPID: true containers: - name: beyla @@ -997,17 +997,17 @@ spec: volumes: - name: beyla-config configMap: - name: ko-beyla + name: k8smon-beyla --- # Source: k8s-monitoring/charts/alloy-metrics/templates/controllers/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1016,11 +1016,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -1028,9 +1028,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -1042,7 +1042,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -1105,4 +1105,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/alloy-metrics.alloy index c83784fe2..529dccce0 100644 --- a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/alloy-metrics.alloy @@ -114,7 +114,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/output.yaml b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/output.yaml index 0211477a7..117fcc009 100644 --- a/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/auto-instrumentation/beyla-metrics/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-beyla + name: k8smon-beyla namespace: default labels: helm.sh/chart: beyla-1.4.13 app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.8" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: beyla @@ -35,7 +35,7 @@ automountServiceAccountToken: true apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -155,7 +155,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -200,12 +200,12 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-beyla + name: k8smon-beyla namespace: default labels: helm.sh/chart: beyla-1.4.13 app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.8" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: beyla @@ -250,11 +250,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -350,11 +350,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-beyla + name: k8smon-beyla labels: helm.sh/chart: beyla-1.4.13 app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.8" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: beyla @@ -371,11 +371,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -384,43 +384,43 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/autoInstrumentation/charts/beyla/templates/cluster-role-binding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-beyla + name: k8smon-beyla labels: helm.sh/chart: beyla-1.4.13 app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.8" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: beyla app.kubernetes.io/component: rbac subjects: - kind: ServiceAccount - name: ko-beyla + name: k8smon-beyla namespace: default roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-beyla + name: k8smon-beyla --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -432,7 +432,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -449,11 +449,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -463,7 +463,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -475,12 +475,12 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-beyla + name: k8smon-beyla namespace: default labels: helm.sh/chart: beyla-1.4.13 app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.8" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: beyla @@ -489,7 +489,7 @@ spec: selector: matchLabels: app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: type: RollingUpdate template: @@ -500,13 +500,13 @@ spec: labels: helm.sh/chart: beyla-1.4.13 app.kubernetes.io/name: beyla - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.8" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: beyla app.kubernetes.io/component: workload spec: - serviceAccountName: ko-beyla + serviceAccountName: k8smon-beyla hostPID: true containers: - name: beyla @@ -529,17 +529,17 @@ spec: volumes: - name: beyla-config configMap: - name: ko-beyla + name: k8smon-beyla --- # Source: k8s-monitoring/charts/alloy-metrics/templates/controllers/statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -548,11 +548,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -560,9 +560,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -574,7 +574,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -637,4 +637,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-events/default/output.yaml b/charts/k8s-monitoring/docs/examples/features/cluster-events/default/output.yaml index fb2876cb9..1dfd2c353 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-events/default/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/cluster-events/default/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,7 +19,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default data: config.alloy: |- @@ -63,11 +63,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -163,11 +163,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -176,21 +176,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-singleton + name: k8smon-alloy-singleton subjects: - kind: ServiceAccount - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default --- # Source: k8s-monitoring/charts/alloy-singleton/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -200,7 +200,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -212,11 +212,11 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -227,7 +227,7 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -235,9 +235,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-singleton + serviceAccountName: k8smon-alloy-singleton containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -309,4 +309,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-singleton + name: k8smon-alloy-singleton diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-metrics.alloy index d52006007..e88b6c77f 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-metrics.alloy @@ -54,7 +54,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -276,7 +276,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -287,7 +287,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -301,7 +301,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -312,7 +312,7 @@ declare "cluster_metrics" { node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -342,7 +342,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -387,7 +387,7 @@ declare "etcd_integration" { } remote.kubernetes.configmap "etcd" { - name = "ko-alloy-module-databases" + name = "k8smon-alloy-module-databases" namespace = "default" } diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-singleton.alloy index 8873cda71..6f2009c76 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/alloy-singleton.alloy @@ -95,7 +95,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/output.yaml b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/output.yaml index 4464bef84..e54a4cb93 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/control-plane-monitoring/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -35,12 +35,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -58,17 +58,17 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -76,16 +76,16 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon automountServiceAccountToken: false --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -93,15 +93,15 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/config.yaml apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -109,9 +109,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -124,7 +124,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -184,7 +184,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -406,7 +406,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -417,7 +417,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -431,7 +431,7 @@ data: } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -442,7 +442,7 @@ data: node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -472,7 +472,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -517,7 +517,7 @@ data: } remote.kubernetes.configmap "etcd" { - name = "ko-alloy-module-databases" + name = "k8smon-alloy-module-databases" namespace = "default" } @@ -548,7 +548,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default data: config.alloy: |- @@ -649,7 +649,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -697,7 +697,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -849,7 +849,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-databases + name: k8smon-alloy-module-databases data: kv_etcd_metrics.alloy: | /* @@ -1101,7 +1101,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -2298,7 +2298,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-system + name: k8smon-alloy-module-system data: node-exporter_metrics.alloy: | /* @@ -2558,11 +2558,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2658,11 +2658,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2758,11 +2758,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2864,10 +2864,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -3014,11 +3014,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3027,21 +3027,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3050,21 +3050,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-singleton/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3073,10 +3073,10 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-singleton + name: k8smon-alloy-singleton subjects: - kind: ServiceAccount - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -3089,28 +3089,28 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3120,7 +3120,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -3132,11 +3132,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3148,7 +3148,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -3165,11 +3165,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3179,7 +3179,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -3191,11 +3191,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3205,7 +3205,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -3217,7 +3217,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -3225,9 +3225,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -3239,13 +3239,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -3253,9 +3253,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -3267,13 +3267,13 @@ spec: name: metrics selector: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -3281,9 +3281,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -3296,17 +3296,17 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/alloy-logs/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3316,16 +3316,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3403,7 +3403,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log @@ -3415,7 +3415,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -3423,14 +3423,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon revisionHistoryLimit: 10 updateStrategy: rollingUpdate: @@ -3447,9 +3447,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -3457,7 +3457,7 @@ spec: runAsGroup: 65534 runAsNonRoot: true runAsUser: 65534 - serviceAccountName: ko-node-exporter + serviceAccountName: k8smon-node-exporter containers: - name: node-exporter image: quay.io/prometheus/node-exporter:v1.8.2 @@ -3542,7 +3542,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -3550,14 +3550,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -3573,9 +3573,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -3587,7 +3587,7 @@ spec: image: ghcr.io/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: ghcr.io/prometheus-community/windows-exporter:0.29.2 @@ -3637,17 +3637,17 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/alloy-singleton/templates/controllers/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3658,7 +3658,7 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -3666,9 +3666,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-singleton + serviceAccountName: k8smon-alloy-singleton containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3740,13 +3740,13 @@ spec: volumes: - name: config configMap: - name: ko-alloy-singleton + name: k8smon-alloy-singleton --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -3754,14 +3754,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -3774,13 +3774,13 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -3836,11 +3836,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3849,11 +3849,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -3861,9 +3861,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3875,7 +3875,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -3938,4 +3938,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/alloy-metrics.alloy index 18f8efb74..a0d657838 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/alloy-metrics.alloy @@ -54,7 +54,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -161,7 +161,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -172,7 +172,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -186,7 +186,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -197,7 +197,7 @@ declare "cluster_metrics" { node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -227,7 +227,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -361,7 +361,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/output.yaml b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/output.yaml index 89bf23c1d..8e85c4001 100644 --- a/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/cluster-metrics/default/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,7 +19,7 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-kepler + name: k8smon-kepler namespace: default labels: helm.sh/chart: kepler-0.5.11 @@ -39,17 +39,17 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -57,21 +57,21 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon automountServiceAccountToken: false --- # Source: k8s-monitoring/charts/clusterMetrics/charts/opencost/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-opencost + name: k8smon-opencost namespace: default labels: helm.sh/chart: opencost-1.42.3 app.kubernetes.io/name: opencost - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.113.0" app.kubernetes.io/part-of: opencost app.kubernetes.io/managed-by: Helm @@ -81,7 +81,7 @@ automountServiceAccountToken: true apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -89,15 +89,15 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/config.yaml apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -105,9 +105,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -120,7 +120,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -180,7 +180,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -287,7 +287,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -298,7 +298,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -312,7 +312,7 @@ data: } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -323,7 +323,7 @@ data: node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -353,7 +353,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -487,7 +487,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -532,7 +532,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -1729,7 +1729,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-system + name: k8smon-alloy-module-system data: node-exporter_metrics.alloy: | /* @@ -1989,11 +1989,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2089,7 +2089,7 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-kepler-clusterrole + name: k8smon-kepler-clusterrole rules: - apiGroups: [""] resources: @@ -2112,10 +2112,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -2264,11 +2264,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-opencost + name: k8smon-opencost labels: helm.sh/chart: opencost-1.42.3 app.kubernetes.io/name: opencost - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.113.0" app.kubernetes.io/part-of: opencost app.kubernetes.io/managed-by: Helm @@ -2349,11 +2349,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2362,24 +2362,24 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kepler/templates/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-kepler-clusterrole-binding + name: k8smon-kepler-clusterrole-binding roleRef: kind: ClusterRole - name: ko-kepler-clusterrole + name: k8smon-kepler-clusterrole apiGroup: "rbac.authorization.k8s.io" subjects: - kind: ServiceAccount - name: ko-kepler + name: k8smon-kepler namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -2392,49 +2392,49 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/opencost/templates/clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-opencost + name: k8smon-opencost labels: helm.sh/chart: opencost-1.42.3 app.kubernetes.io/name: opencost - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.113.0" app.kubernetes.io/part-of: opencost app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-opencost + name: k8smon-opencost subjects: - kind: ServiceAccount - name: ko-opencost + name: k8smon-opencost namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2446,7 +2446,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -2463,11 +2463,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2477,7 +2477,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2489,7 +2489,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kepler + name: k8smon-kepler namespace: default labels: helm.sh/chart: kepler-0.5.11 @@ -2512,7 +2512,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2520,9 +2520,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -2534,13 +2534,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2548,9 +2548,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2562,25 +2562,25 @@ spec: name: metrics selector: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/opencost/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-opencost + name: k8smon-opencost namespace: default labels: helm.sh/chart: opencost-1.42.3 app.kubernetes.io/name: opencost - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.113.0" app.kubernetes.io/part-of: opencost app.kubernetes.io/managed-by: Helm spec: selector: app.kubernetes.io/name: opencost - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon type: "ClusterIP" ports: - name: http @@ -2591,7 +2591,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2599,9 +2599,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2614,13 +2614,13 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kepler/templates/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-kepler + name: k8smon-kepler namespace: default labels: helm.sh/chart: kepler-0.5.11 @@ -2642,7 +2642,7 @@ spec: app.kubernetes.io/component: exporter spec: hostNetwork: true - serviceAccountName: ko-kepler + serviceAccountName: k8smon-kepler containers: - name: kepler-exporter image: "quay.io/sustainable_computing_io/kepler:release-0.7.12" @@ -2739,7 +2739,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2747,14 +2747,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon revisionHistoryLimit: 10 updateStrategy: rollingUpdate: @@ -2771,9 +2771,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2781,7 +2781,7 @@ spec: runAsGroup: 65534 runAsNonRoot: true runAsUser: 65534 - serviceAccountName: ko-node-exporter + serviceAccountName: k8smon-node-exporter containers: - name: node-exporter image: quay.io/prometheus/node-exporter:v1.8.2 @@ -2866,7 +2866,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2874,14 +2874,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -2897,9 +2897,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2911,7 +2911,7 @@ spec: image: ghcr.io/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: ghcr.io/prometheus-community/windows-exporter:0.29.2 @@ -2961,13 +2961,13 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2975,14 +2975,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -2995,13 +2995,13 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -3057,12 +3057,12 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - name: ko-opencost + name: k8smon-opencost namespace: default labels: helm.sh/chart: opencost-1.42.3 app.kubernetes.io/name: opencost - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.113.0" app.kubernetes.io/part-of: opencost app.kubernetes.io/managed-by: Helm @@ -3071,7 +3071,7 @@ spec: selector: matchLabels: app.kubernetes.io/name: opencost - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon strategy: rollingUpdate: maxSurge: 1 @@ -3081,9 +3081,9 @@ spec: metadata: labels: app.kubernetes.io/name: opencost - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-opencost + serviceAccountName: k8smon-opencost tolerations: - effect: NoSchedule key: kubernetes.io/arch @@ -3092,7 +3092,7 @@ spec: nodeSelector: kubernetes.io/os: linux containers: - - name: ko-opencost + - name: k8smon-opencost image: ghcr.io/opencost/opencost:1.113.0@sha256:b313d6d320058bbd3841a948fb636182f49b46df2368d91e2ae046ed03c0f83c imagePullPolicy: IfNotPresent args: @@ -3168,11 +3168,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3181,11 +3181,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -3193,9 +3193,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3207,7 +3207,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -3270,4 +3270,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/integrations/alloy/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/integrations/alloy/alloy-metrics.alloy index b18274070..bb3d64887 100644 --- a/charts/k8s-monitoring/docs/examples/features/integrations/alloy/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/integrations/alloy/alloy-metrics.alloy @@ -340,7 +340,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/features/integrations/alloy/output.yaml b/charts/k8s-monitoring/docs/examples/features/integrations/alloy/output.yaml index 3587ab969..d1141e6b6 100644 --- a/charts/k8s-monitoring/docs/examples/features/integrations/alloy/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/integrations/alloy/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,7 +19,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -365,7 +365,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -410,11 +410,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -510,11 +510,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -523,21 +523,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -549,7 +549,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -566,11 +566,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -580,7 +580,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -592,11 +592,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -605,11 +605,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -617,9 +617,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -631,7 +631,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -694,4 +694,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/integrations/cert-manager/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/integrations/cert-manager/alloy-metrics.alloy index 7a8284560..efc3188ce 100644 --- a/charts/k8s-monitoring/docs/examples/features/integrations/cert-manager/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/integrations/cert-manager/alloy-metrics.alloy @@ -53,7 +53,7 @@ declare "cert_manager_integration" { } remote.kubernetes.configmap "cert_manager" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -94,7 +94,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/features/integrations/cert-manager/output.yaml b/charts/k8s-monitoring/docs/examples/features/integrations/cert-manager/output.yaml index 7cd849f3e..6e7494595 100644 --- a/charts/k8s-monitoring/docs/examples/features/integrations/cert-manager/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/integrations/cert-manager/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,7 +19,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -78,7 +78,7 @@ data: } remote.kubernetes.configmap "cert_manager" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -119,7 +119,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -164,7 +164,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: cert-manager_metrics.alloy: | /* @@ -395,11 +395,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -495,11 +495,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -508,21 +508,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -534,7 +534,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -551,11 +551,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -565,7 +565,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -577,11 +577,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -590,11 +590,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -602,9 +602,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -616,7 +616,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -679,4 +679,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/integrations/etcd/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/integrations/etcd/alloy-metrics.alloy index 21b6600c7..698737086 100644 --- a/charts/k8s-monitoring/docs/examples/features/integrations/etcd/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/integrations/etcd/alloy-metrics.alloy @@ -53,7 +53,7 @@ declare "etcd_integration" { } remote.kubernetes.configmap "etcd" { - name = "ko-alloy-module-databases" + name = "k8smon-alloy-module-databases" namespace = "default" } @@ -93,7 +93,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/features/integrations/etcd/output.yaml b/charts/k8s-monitoring/docs/examples/features/integrations/etcd/output.yaml index d5015bc95..20bab5f42 100644 --- a/charts/k8s-monitoring/docs/examples/features/integrations/etcd/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/integrations/etcd/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,7 +19,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -78,7 +78,7 @@ data: } remote.kubernetes.configmap "etcd" { - name = "ko-alloy-module-databases" + name = "k8smon-alloy-module-databases" namespace = "default" } @@ -118,7 +118,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -163,7 +163,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-databases + name: k8smon-alloy-module-databases data: kv_etcd_metrics.alloy: | /* @@ -415,11 +415,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -515,11 +515,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -528,21 +528,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -554,7 +554,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -571,11 +571,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -585,7 +585,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -597,11 +597,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -610,11 +610,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -622,9 +622,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -636,7 +636,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -699,4 +699,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/integrations/mysql/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/integrations/mysql/alloy-metrics.alloy index 711ef4a60..89522aa4b 100644 --- a/charts/k8s-monitoring/docs/examples/features/integrations/mysql/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/integrations/mysql/alloy-metrics.alloy @@ -73,7 +73,7 @@ declare "mysql_integration" { remote.kubernetes.secret "prod_db" { - name = "prod-db-ko-integrations" + name = "prod-db-k8smon-integrations" namespace = "default" } @@ -120,7 +120,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/features/integrations/mysql/output.yaml b/charts/k8s-monitoring/docs/examples/features/integrations/mysql/output.yaml index fe5ad4681..785710fb8 100644 --- a/charts/k8s-monitoring/docs/examples/features/integrations/mysql/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/integrations/mysql/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -35,7 +35,7 @@ metadata: apiVersion: v1 kind: Secret metadata: - name: "prod-db-ko-integrations" + name: "prod-db-k8smon-integrations" namespace: "default" stringData: username: "db-admin" @@ -45,7 +45,7 @@ stringData: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -124,7 +124,7 @@ data: remote.kubernetes.secret "prod_db" { - name = "prod-db-ko-integrations" + name = "prod-db-k8smon-integrations" namespace = "default" } @@ -171,7 +171,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -217,7 +217,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -429,11 +429,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -529,11 +529,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -629,11 +629,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -642,21 +642,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -665,21 +665,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -689,7 +689,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -701,11 +701,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -717,7 +717,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -734,11 +734,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -748,7 +748,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -760,11 +760,11 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -774,16 +774,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -861,7 +861,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log @@ -873,11 +873,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -886,11 +886,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -898,9 +898,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -912,7 +912,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -975,4 +975,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/features/node-logs/default/README.md b/charts/k8s-monitoring/docs/examples/features/node-logs/default/README.md new file mode 100644 index 000000000..c6f7ecea5 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/node-logs/default/README.md @@ -0,0 +1,24 @@ + +# Example: features/node-logs/default/values.yaml + +## Values + +```yaml +--- +cluster: + name: node-logs-cluster + +destinations: + - name: loki + type: loki + url: http://loki.loki.svc:3100/api/push + +nodeLogs: + enabled: true + +alloy-logs: + enabled: true +``` diff --git a/charts/k8s-monitoring/docs/examples/features/node-logs/default/alloy-logs.alloy b/charts/k8s-monitoring/docs/examples/features/node-logs/default/alloy-logs.alloy new file mode 100644 index 000000000..19bc87fd9 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/node-logs/default/alloy-logs.alloy @@ -0,0 +1,56 @@ +// Destination: loki (loki) +otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] +} + +loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/api/push" + tls_config { + insecure_skip_verify = false + } + } + external_labels = { + cluster = "node-logs-cluster", + "k8s_cluster_name" = "node-logs-cluster", + } +} + +// Feature: Node Logs +declare "node_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.relabel "journal" { + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "unit" + } + + forward_to = [] // No forward_to is used in this component, the defined rules are used in the loki.source.journal component + } + + loki.source.journal "worker" { + path = "/var/log/journal" + format_as_json = false + max_age = "8h" + relabel_rules = loki.relabel.journal.rules + labels = { + job = "integrations/kubernetes/journal", + instance = env("HOSTNAME"), + } + forward_to = [loki.process.journal_logs.receiver] + } + + loki.process "journal_logs" { + forward_to = argument.logs_destinations.value + } +} +node_logs "feature" { + logs_destinations = [ + loki.write.loki.receiver, + ] +} diff --git a/charts/k8s-monitoring/docs/examples/features/node-logs/default/output.yaml b/charts/k8s-monitoring/docs/examples/features/node-logs/default/output.yaml new file mode 100644 index 000000000..cbeeac845 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/node-logs/default/output.yaml @@ -0,0 +1,343 @@ +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: k8smon-alloy-logs + namespace: default + labels: + helm.sh/chart: alloy-logs-0.10.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: k8smon + + app.kubernetes.io/version: "v1.5.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/templates/alloy-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8smon-alloy-logs + namespace: default +data: + config.alloy: |- + // Destination: loki (loki) + otelcol.exporter.loki "loki" { + forward_to = [loki.write.loki.receiver] + } + + loki.write "loki" { + endpoint { + url = "http://loki.loki.svc:3100/api/push" + tls_config { + insecure_skip_verify = false + } + } + external_labels = { + cluster = "node-logs-cluster", + "k8s_cluster_name" = "node-logs-cluster", + } + } + + // Feature: Node Logs + declare "node_logs" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.relabel "journal" { + rule { + action = "replace" + source_labels = ["__journal__systemd_unit"] + replacement = "$1" + target_label = "unit" + } + + forward_to = [] // No forward_to is used in this component, the defined rules are used in the loki.source.journal component + } + + loki.source.journal "worker" { + path = "/var/log/journal" + format_as_json = false + max_age = "8h" + relabel_rules = loki.relabel.journal.rules + labels = { + job = "integrations/kubernetes/journal", + instance = env("HOSTNAME"), + } + forward_to = [loki.process.journal_logs.receiver] + } + + loki.process "journal_logs" { + forward_to = argument.logs_destinations.value + } + } + node_logs "feature" { + logs_destinations = [ + loki.write.loki.receiver, + ] + } +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: k8smon-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.10.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: k8smon + + app.kubernetes.io/version: "v1.5.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +rules: + # Rules which allow discovery.kubernetes to function. + - apiGroups: + - "" + - "discovery.k8s.io" + - "networking.k8s.io" + resources: + - endpoints + - endpointslices + - ingresses + - nodes + - nodes/proxy + - nodes/metrics + - pods + - services + verbs: + - get + - list + - watch + # Rules which allow loki.source.kubernetes and loki.source.podlogs to work. + - apiGroups: + - "" + resources: + - pods + - pods/log + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - "monitoring.grafana.com" + resources: + - podlogs + verbs: + - get + - list + - watch + # Rules which allow mimir.rules.kubernetes to work. + - apiGroups: ["monitoring.coreos.com"] + resources: + - prometheusrules + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + verbs: + - get + # Rules for prometheus.kubernetes.* + - apiGroups: ["monitoring.coreos.com"] + resources: + - podmonitors + - servicemonitors + - probes + verbs: + - get + - list + - watch + # Rules which allow eventhandler to work. + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + # needed for remote.kubernetes.* + - apiGroups: [""] + resources: + - "configmaps" + - "secrets" + verbs: + - get + - list + - watch + # needed for otelcol.processor.k8sattributes + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: k8smon-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.10.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: k8smon + + app.kubernetes.io/version: "v1.5.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: k8smon-alloy-logs +subjects: + - kind: ServiceAccount + name: k8smon-alloy-logs + namespace: default +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: k8smon-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.10.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: k8smon + + app.kubernetes.io/version: "v1.5.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: networking +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: k8smon + internalTrafficPolicy: Cluster + ports: + - name: http-metrics + port: 12345 + targetPort: 12345 + protocol: "TCP" +--- +# Source: k8s-monitoring/charts/alloy-logs/templates/controllers/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: k8smon-alloy-logs + labels: + helm.sh/chart: alloy-logs-0.10.0 + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: k8smon + + app.kubernetes.io/version: "v1.5.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy +spec: + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: k8smon + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: alloy + labels: + app.kubernetes.io/name: alloy-logs + app.kubernetes.io/instance: k8smon + spec: + serviceAccountName: k8smon-alloy-logs + containers: + - name: alloy + image: docker.io/grafana/alloy:v1.5.0 + imagePullPolicy: IfNotPresent + args: + - run + - /etc/alloy/config.alloy + - --storage.path=/tmp/alloy + - --server.http.listen-addr=0.0.0.0:12345 + - --server.http.ui-path-prefix=/ + - --stability.level=generally-available + env: + - name: ALLOY_DEPLOY_MODE + value: "helm" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + ports: + - containerPort: 12345 + name: http-metrics + readinessProbe: + httpGet: + path: /-/ready + port: 12345 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: config + mountPath: /etc/alloy + - name: varlog + mountPath: /var/log + readOnly: true + - name: dockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + - name: config-reloader + image: ghcr.io/jimmidyson/configmap-reload:v0.12.0 + args: + - --volume-dir=/etc/alloy + - --webhook-url=http://localhost:12345/-/reload + volumeMounts: + - name: config + mountPath: /etc/alloy + resources: + requests: + cpu: 1m + memory: 5Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + volumes: + - name: config + configMap: + name: k8smon-alloy-logs + - name: varlog + hostPath: + path: /var/log + - name: dockercontainers + hostPath: + path: /var/lib/docker/containers diff --git a/charts/k8s-monitoring/docs/examples/features/node-logs/default/values.yaml b/charts/k8s-monitoring/docs/examples/features/node-logs/default/values.yaml new file mode 100644 index 000000000..bf1980748 --- /dev/null +++ b/charts/k8s-monitoring/docs/examples/features/node-logs/default/values.yaml @@ -0,0 +1,14 @@ +--- +cluster: + name: node-logs-cluster + +destinations: + - name: loki + type: loki + url: http://loki.loki.svc:3100/api/push + +nodeLogs: + enabled: true + +alloy-logs: + enabled: true diff --git a/charts/k8s-monitoring/docs/examples/features/pod-logs/default/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/features/pod-logs/default/alloy-singleton.alloy deleted file mode 100644 index 7b27b9431..000000000 --- a/charts/k8s-monitoring/docs/examples/features/pod-logs/default/alloy-singleton.alloy +++ /dev/null @@ -1,32 +0,0 @@ -// Destination: loki (loki) -otelcol.exporter.loki "loki" { - forward_to = [loki.write.loki.receiver] -} - -loki.write "loki" { - endpoint { - url = "http://loki.loki.svc:3100/api/push" - } - external_labels = { - cluster = "pod-logs-cluster", - "k8s_cluster_name" = "pod-logs-cluster", - } -} - -// Feature: Cluster Events -declare "cluster_events" { - argument "logs_destinations" { - comment = "Must be a list of log destinations where collected logs should be forwarded to" - } - - loki.source.kubernetes_events "cluster_events" { - job_name = "integrations/kubernetes/eventhandler" - log_format = "logfmt" - forward_to = argument.logs_destinations.value - } -} -cluster_events "feature" { - logs_destinations = [ - loki.write.loki.receiver, - ] -} diff --git a/charts/k8s-monitoring/docs/examples/features/pod-logs/default/output.yaml b/charts/k8s-monitoring/docs/examples/features/pod-logs/default/output.yaml index 28f5b3282..90e61e218 100644 --- a/charts/k8s-monitoring/docs/examples/features/pod-logs/default/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/pod-logs/default/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,7 +19,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -171,11 +171,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -271,11 +271,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -284,21 +284,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -308,7 +308,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -320,11 +320,11 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -334,16 +334,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -421,7 +421,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log diff --git a/charts/k8s-monitoring/docs/examples/features/profiling/default/output.yaml b/charts/k8s-monitoring/docs/examples/features/profiling/default/output.yaml index a727481d4..a8f307d3d 100644 --- a/charts/k8s-monitoring/docs/examples/features/profiling/default/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/profiling/default/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-profiles + name: k8smon-alloy-profiles namespace: default labels: helm.sh/chart: alloy-profiles-0.10.0 app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,7 +19,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-profiles + name: k8smon-alloy-profiles namespace: default data: config.alloy: |- @@ -875,11 +875,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-profiles + name: k8smon-alloy-profiles labels: helm.sh/chart: alloy-profiles-0.10.0 app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -975,11 +975,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-profiles + name: k8smon-alloy-profiles labels: helm.sh/chart: alloy-profiles-0.10.0 app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -988,21 +988,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-profiles + name: k8smon-alloy-profiles subjects: - kind: ServiceAccount - name: ko-alloy-profiles + name: k8smon-alloy-profiles namespace: default --- # Source: k8s-monitoring/charts/alloy-profiles/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-profiles + name: k8smon-alloy-profiles labels: helm.sh/chart: alloy-profiles-0.10.0 app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1012,7 +1012,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -1024,11 +1024,11 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-profiles + name: k8smon-alloy-profiles labels: helm.sh/chart: alloy-profiles-0.10.0 app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1038,16 +1038,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-profiles + serviceAccountName: k8smon-alloy-profiles containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -1105,4 +1105,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-profiles + name: k8smon-alloy-profiles diff --git a/charts/k8s-monitoring/docs/examples/features/prometheus-operator-objects/default/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/features/prometheus-operator-objects/default/alloy-metrics.alloy index 2f41af909..f49b05cb7 100644 --- a/charts/k8s-monitoring/docs/examples/features/prometheus-operator-objects/default/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/features/prometheus-operator-objects/default/alloy-metrics.alloy @@ -105,7 +105,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/features/prometheus-operator-objects/default/output.yaml b/charts/k8s-monitoring/docs/examples/features/prometheus-operator-objects/default/output.yaml index 0fa6eebbd..85ceebcc2 100644 --- a/charts/k8s-monitoring/docs/examples/features/prometheus-operator-objects/default/output.yaml +++ b/charts/k8s-monitoring/docs/examples/features/prometheus-operator-objects/default/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,7 +19,7 @@ metadata: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -130,7 +130,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -175,11 +175,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -275,11 +275,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -288,21 +288,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -314,7 +314,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -331,11 +331,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -345,7 +345,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -357,11 +357,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -370,11 +370,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -382,9 +382,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -396,7 +396,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -459,4 +459,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/metrics-tuning/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/metrics-tuning/alloy-metrics.alloy index 4e69cf819..8d5c39baf 100644 --- a/charts/k8s-monitoring/docs/examples/metrics-tuning/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/metrics-tuning/alloy-metrics.alloy @@ -265,7 +265,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -372,7 +372,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -383,7 +383,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -397,7 +397,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -408,7 +408,7 @@ declare "cluster_metrics" { node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -438,7 +438,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -490,7 +490,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/metrics-tuning/output.yaml b/charts/k8s-monitoring/docs/examples/metrics-tuning/output.yaml index 3d57f927c..1c0bb78c3 100644 --- a/charts/k8s-monitoring/docs/examples/metrics-tuning/output.yaml +++ b/charts/k8s-monitoring/docs/examples/metrics-tuning/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -26,17 +26,17 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -44,16 +44,16 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon automountServiceAccountToken: false --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -61,15 +61,15 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/config.yaml apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -77,9 +77,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -92,7 +92,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -363,7 +363,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -470,7 +470,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -481,7 +481,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -495,7 +495,7 @@ data: } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -506,7 +506,7 @@ data: node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -536,7 +536,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -588,7 +588,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -634,7 +634,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -1831,7 +1831,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-system + name: k8smon-alloy-module-system data: node-exporter_metrics.alloy: | /* @@ -2091,11 +2091,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2197,10 +2197,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -2347,11 +2347,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2360,10 +2360,10 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -2376,28 +2376,28 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2409,7 +2409,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -2426,11 +2426,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2440,7 +2440,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2452,7 +2452,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2460,9 +2460,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -2474,13 +2474,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2488,9 +2488,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2502,13 +2502,13 @@ spec: name: metrics selector: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2516,9 +2516,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2531,13 +2531,13 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2545,14 +2545,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon revisionHistoryLimit: 10 updateStrategy: rollingUpdate: @@ -2569,9 +2569,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2579,7 +2579,7 @@ spec: runAsGroup: 65534 runAsNonRoot: true runAsUser: 65534 - serviceAccountName: ko-node-exporter + serviceAccountName: k8smon-node-exporter containers: - name: node-exporter image: quay.io/prometheus/node-exporter:v1.8.2 @@ -2664,7 +2664,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2672,14 +2672,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -2695,9 +2695,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2709,7 +2709,7 @@ spec: image: ghcr.io/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: ghcr.io/prometheus-community/windows-exporter:0.29.2 @@ -2759,13 +2759,13 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2773,14 +2773,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -2793,13 +2793,13 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -2855,11 +2855,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2868,11 +2868,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -2880,9 +2880,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -2894,7 +2894,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -2957,4 +2957,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-metrics.alloy index 0ad14f928..76a880c7e 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-metrics.alloy @@ -54,7 +54,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -161,7 +161,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -172,7 +172,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -186,7 +186,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -197,7 +197,7 @@ declare "cluster_metrics" { node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -227,7 +227,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } diff --git a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-singleton.alloy index 49bd2a914..d244a43f8 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/alloy-singleton.alloy @@ -95,7 +95,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/output.yaml b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/output.yaml index 6a836ca99..f8b1c1ed0 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/azure-aks/output.yaml +++ b/charts/k8s-monitoring/docs/examples/platforms/azure-aks/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -35,12 +35,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -58,17 +58,17 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -76,16 +76,16 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon automountServiceAccountToken: false --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -93,15 +93,15 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/config.yaml apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -109,9 +109,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -124,7 +124,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -184,7 +184,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -291,7 +291,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -302,7 +302,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -316,7 +316,7 @@ data: } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -327,7 +327,7 @@ data: node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -357,7 +357,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -400,7 +400,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default data: config.alloy: |- @@ -501,7 +501,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -548,7 +548,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -700,7 +700,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -1897,7 +1897,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-system + name: k8smon-alloy-module-system data: node-exporter_metrics.alloy: | /* @@ -2157,11 +2157,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2257,11 +2257,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2357,11 +2357,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2463,10 +2463,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -2613,11 +2613,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2626,21 +2626,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2649,21 +2649,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-singleton/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2672,10 +2672,10 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-singleton + name: k8smon-alloy-singleton subjects: - kind: ServiceAccount - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -2688,28 +2688,28 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2719,7 +2719,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2731,11 +2731,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2747,7 +2747,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -2764,11 +2764,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2778,7 +2778,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2790,11 +2790,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2804,7 +2804,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2816,7 +2816,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2824,9 +2824,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -2838,13 +2838,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2852,9 +2852,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2866,13 +2866,13 @@ spec: name: metrics selector: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2880,9 +2880,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2895,17 +2895,17 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/alloy-logs/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2915,7 +2915,7 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -2923,9 +2923,9 @@ spec: kubernetes.azure.com/set-kube-service-host-fqdn: "true" labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3003,7 +3003,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log @@ -3015,7 +3015,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -3023,14 +3023,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon revisionHistoryLimit: 10 updateStrategy: rollingUpdate: @@ -3047,9 +3047,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -3057,7 +3057,7 @@ spec: runAsGroup: 65534 runAsNonRoot: true runAsUser: 65534 - serviceAccountName: ko-node-exporter + serviceAccountName: k8smon-node-exporter containers: - name: node-exporter image: quay.io/prometheus/node-exporter:v1.8.2 @@ -3142,7 +3142,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -3150,14 +3150,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -3173,9 +3173,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -3187,7 +3187,7 @@ spec: image: ghcr.io/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: ghcr.io/prometheus-community/windows-exporter:0.29.2 @@ -3237,17 +3237,17 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/alloy-singleton/templates/controllers/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3258,7 +3258,7 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -3267,9 +3267,9 @@ spec: kubernetes.azure.com/set-kube-service-host-fqdn: "true" labels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-singleton + serviceAccountName: k8smon-alloy-singleton containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3341,13 +3341,13 @@ spec: volumes: - name: config configMap: - name: ko-alloy-singleton + name: k8smon-alloy-singleton --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -3355,14 +3355,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -3375,16 +3375,16 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: kubernetes.azure.com/set-kube-service-host-fqdn: "true" spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -3440,11 +3440,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3453,11 +3453,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -3466,9 +3466,9 @@ spec: kubernetes.azure.com/set-kube-service-host-fqdn: "true" labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3480,7 +3480,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -3543,4 +3543,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-metrics.alloy index 20751b48a..c1d171b75 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-metrics.alloy @@ -54,7 +54,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -161,7 +161,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -172,7 +172,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -192,7 +192,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } diff --git a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-singleton.alloy index 353d31e48..ddd144811 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/alloy-singleton.alloy @@ -95,7 +95,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/output.yaml b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/output.yaml index 5c99fafca..3ac1b23de 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/output.yaml +++ b/charts/k8s-monitoring/docs/examples/platforms/eks-fargate/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -35,12 +35,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -58,17 +58,17 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -76,15 +76,15 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/config.yaml apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -92,9 +92,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -107,7 +107,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -167,7 +167,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -274,7 +274,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -285,7 +285,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -305,7 +305,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -348,7 +348,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default data: config.alloy: |- @@ -449,7 +449,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -496,7 +496,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -628,7 +628,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -1825,11 +1825,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1925,11 +1925,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2025,11 +2025,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2131,10 +2131,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -2281,11 +2281,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2294,21 +2294,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2317,21 +2317,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-singleton/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2340,10 +2340,10 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-singleton + name: k8smon-alloy-singleton subjects: - kind: ServiceAccount - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -2356,28 +2356,28 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs-cluster + name: k8smon-alloy-logs-cluster labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2389,7 +2389,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -2406,11 +2406,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2420,7 +2420,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2432,11 +2432,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2448,7 +2448,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -2465,11 +2465,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2479,7 +2479,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2491,11 +2491,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2505,7 +2505,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2517,7 +2517,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2525,9 +2525,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -2539,13 +2539,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2553,9 +2553,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2568,13 +2568,13 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2582,14 +2582,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -2605,9 +2605,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2619,7 +2619,7 @@ spec: image: ghcr.io/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: ghcr.io/prometheus-community/windows-exporter:0.29.2 @@ -2669,17 +2669,17 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/alloy-logs/templates/controllers/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2690,16 +2690,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -2711,7 +2711,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-logs-cluster + - --cluster.join-addresses=k8smon-alloy-logs-cluster - --stability.level=generally-available env: - name: ALLOY_DEPLOY_MODE @@ -2776,7 +2776,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: dockercontainers hostPath: path: /var/lib/docker/containers @@ -2785,11 +2785,11 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2800,7 +2800,7 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -2808,9 +2808,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-singleton + serviceAccountName: k8smon-alloy-singleton containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -2882,13 +2882,13 @@ spec: volumes: - name: config configMap: - name: ko-alloy-singleton + name: k8smon-alloy-singleton --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2896,14 +2896,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -2916,13 +2916,13 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -2978,11 +2978,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2991,11 +2991,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -3003,9 +3003,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3017,7 +3017,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -3080,4 +3080,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-metrics.alloy index b22293759..1ce9914aa 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-metrics.alloy @@ -54,7 +54,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -161,7 +161,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -172,7 +172,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -192,7 +192,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } diff --git a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-singleton.alloy index 2af3b8bb1..e0067d9a0 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/alloy-singleton.alloy @@ -95,7 +95,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/output.yaml b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/output.yaml index 0a2b5f418..028d1372c 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/output.yaml +++ b/charts/k8s-monitoring/docs/examples/platforms/gke-autopilot/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -35,12 +35,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -58,17 +58,17 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -76,15 +76,15 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/config.yaml apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -92,9 +92,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -107,7 +107,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -167,7 +167,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -274,7 +274,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -285,7 +285,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -305,7 +305,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -348,7 +348,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default data: config.alloy: |- @@ -449,7 +449,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -496,7 +496,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -648,7 +648,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -1845,11 +1845,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -1945,11 +1945,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2045,11 +2045,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2151,10 +2151,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -2301,11 +2301,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2314,21 +2314,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2337,21 +2337,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-singleton/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2360,10 +2360,10 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-singleton + name: k8smon-alloy-singleton subjects: - kind: ServiceAccount - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -2376,28 +2376,28 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2407,7 +2407,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2419,11 +2419,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2435,7 +2435,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -2452,11 +2452,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2466,7 +2466,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2478,11 +2478,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2492,7 +2492,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2504,7 +2504,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2512,9 +2512,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -2526,13 +2526,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2540,9 +2540,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2555,17 +2555,17 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/alloy-logs/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2575,16 +2575,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -2662,7 +2662,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log @@ -2674,7 +2674,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2682,14 +2682,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -2705,9 +2705,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2719,7 +2719,7 @@ spec: image: ghcr.io/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: ghcr.io/prometheus-community/windows-exporter:0.29.2 @@ -2769,17 +2769,17 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/alloy-singleton/templates/controllers/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2790,7 +2790,7 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -2798,9 +2798,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-singleton + serviceAccountName: k8smon-alloy-singleton containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -2872,13 +2872,13 @@ spec: volumes: - name: config configMap: - name: ko-alloy-singleton + name: k8smon-alloy-singleton --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2886,14 +2886,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -2906,13 +2906,13 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -2968,11 +2968,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2981,11 +2981,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -2993,9 +2993,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3007,7 +3007,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -3070,4 +3070,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-metrics.alloy index 37ee13be6..a535837cf 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-metrics.alloy @@ -54,7 +54,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -161,7 +161,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -172,7 +172,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -186,7 +186,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -197,7 +197,7 @@ declare "cluster_metrics" { node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -227,7 +227,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } diff --git a/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-singleton.alloy index 2f8e1114f..63a619bf7 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/platforms/openshift/alloy-singleton.alloy @@ -95,7 +95,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/platforms/openshift/output.yaml b/charts/k8s-monitoring/docs/examples/platforms/openshift/output.yaml index 52774d6a3..907a742e6 100644 --- a/charts/k8s-monitoring/docs/examples/platforms/openshift/output.yaml +++ b/charts/k8s-monitoring/docs/examples/platforms/openshift/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -35,12 +35,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -51,7 +51,7 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-kepler + name: k8smon-kepler namespace: default labels: helm.sh/chart: kepler-0.5.11 @@ -71,17 +71,17 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -89,16 +89,16 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon automountServiceAccountToken: false --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -106,15 +106,15 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/config.yaml apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -122,9 +122,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -137,7 +137,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -197,7 +197,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -304,7 +304,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -315,7 +315,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -329,7 +329,7 @@ data: } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -340,7 +340,7 @@ data: node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -370,7 +370,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -454,7 +454,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default data: config.alloy: |- @@ -555,7 +555,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -602,7 +602,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -754,7 +754,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -1951,7 +1951,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-system + name: k8smon-alloy-module-system data: node-exporter_metrics.alloy: | /* @@ -2211,11 +2211,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2311,11 +2311,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2411,11 +2411,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2511,7 +2511,7 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-kepler-clusterrole + name: k8smon-kepler-clusterrole rules: - apiGroups: [""] resources: @@ -2534,10 +2534,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -2684,7 +2684,7 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-kepler-scc + name: k8smon-kepler-scc rules: - verbs: - use @@ -2693,13 +2693,13 @@ rules: resources: - securitycontextconstraints resourceNames: - - ko-kepler + - k8smon-kepler --- # Source: k8s-monitoring/templates/platform_specific/openshift/alloy-logs-scc.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs-scc + name: k8smon-alloy-logs-scc rules: - verbs: - use @@ -2708,13 +2708,13 @@ rules: resources: - securitycontextconstraints resourceNames: - - ko-alloy-logs + - k8smon-alloy-logs --- # Source: k8s-monitoring/templates/platform_specific/openshift/alloy-metrics-scc.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics-scc + name: k8smon-alloy-metrics-scc rules: - verbs: - use @@ -2723,13 +2723,13 @@ rules: resources: - securitycontextconstraints resourceNames: - - ko-alloy-metrics + - k8smon-alloy-metrics --- # Source: k8s-monitoring/templates/platform_specific/openshift/alloy-singleton-scc.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-singleton-scc + name: k8smon-alloy-singleton-scc rules: - verbs: - use @@ -2738,17 +2738,17 @@ rules: resources: - securitycontextconstraints resourceNames: - - ko-alloy-singleton + - k8smon-alloy-singleton --- # Source: k8s-monitoring/charts/alloy-logs/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2757,21 +2757,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2780,21 +2780,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-singleton/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2803,24 +2803,24 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-singleton + name: k8smon-alloy-singleton subjects: - kind: ServiceAccount - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kepler/templates/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-kepler-clusterrole-binding + name: k8smon-kepler-clusterrole-binding roleRef: kind: ClusterRole - name: ko-kepler-clusterrole + name: k8smon-kepler-clusterrole apiGroup: "rbac.authorization.k8s.io" subjects: - kind: ServiceAccount - name: ko-kepler + name: k8smon-kepler namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -2833,84 +2833,84 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/templates/platform_specific/openshift/kepler-scc.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-kepler-scc + name: k8smon-kepler-scc roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kepler-scc + name: k8smon-kepler-scc subjects: - kind: ServiceAccount - name: ko-kepler + name: k8smon-kepler namespace: default --- # Source: k8s-monitoring/templates/platform_specific/openshift/alloy-logs-scc.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs-scc + name: k8smon-alloy-logs-scc roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs-scc + name: k8smon-alloy-logs-scc subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/templates/platform_specific/openshift/alloy-metrics-scc.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics-scc + name: k8smon-alloy-metrics-scc roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics-scc + name: k8smon-alloy-metrics-scc subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/templates/platform_specific/openshift/alloy-singleton-scc.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-singleton-scc + name: k8smon-alloy-singleton-scc roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-singleton-scc + name: k8smon-alloy-singleton-scc subjects: - kind: ServiceAccount - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2920,7 +2920,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2932,11 +2932,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2948,7 +2948,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -2965,11 +2965,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2979,7 +2979,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2991,11 +2991,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3005,7 +3005,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -3017,7 +3017,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kepler + name: k8smon-kepler namespace: default labels: helm.sh/chart: kepler-0.5.11 @@ -3040,7 +3040,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -3048,9 +3048,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -3062,13 +3062,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -3076,9 +3076,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -3090,13 +3090,13 @@ spec: name: metrics selector: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -3104,9 +3104,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -3119,17 +3119,17 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/alloy-logs/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3139,16 +3139,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3226,7 +3226,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log @@ -3238,7 +3238,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-kepler + name: k8smon-kepler namespace: default labels: helm.sh/chart: kepler-0.5.11 @@ -3260,7 +3260,7 @@ spec: app.kubernetes.io/component: exporter spec: hostNetwork: true - serviceAccountName: ko-kepler + serviceAccountName: k8smon-kepler containers: - name: kepler-exporter image: "quay.io/sustainable_computing_io/kepler:release-0.7.12" @@ -3357,7 +3357,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -3365,14 +3365,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon revisionHistoryLimit: 10 updateStrategy: rollingUpdate: @@ -3389,9 +3389,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -3399,7 +3399,7 @@ spec: runAsGroup: 65534 runAsNonRoot: true runAsUser: 65534 - serviceAccountName: ko-node-exporter + serviceAccountName: k8smon-node-exporter containers: - name: node-exporter image: quay.io/prometheus/node-exporter:v1.8.2 @@ -3484,7 +3484,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -3492,14 +3492,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -3515,9 +3515,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -3529,7 +3529,7 @@ spec: image: ghcr.io/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: ghcr.io/prometheus-community/windows-exporter:0.29.2 @@ -3579,17 +3579,17 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/alloy-singleton/templates/controllers/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3600,7 +3600,7 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -3608,9 +3608,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-singleton + serviceAccountName: k8smon-alloy-singleton containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3682,13 +3682,13 @@ spec: volumes: - name: config configMap: - name: ko-alloy-singleton + name: k8smon-alloy-singleton --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -3696,14 +3696,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -3716,13 +3716,13 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -3778,11 +3778,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3791,11 +3791,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -3803,9 +3803,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -3817,7 +3817,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -3880,13 +3880,13 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics --- # Source: k8s-monitoring/charts/clusterMetrics/templates/platform_specific/openshift/kepler-scc.yaml apiVersion: security.openshift.io/v1 kind: SecurityContextConstraints metadata: - name: ko-kepler + name: k8smon-kepler allowHostDirVolumePlugin: true allowHostIPC: false allowHostNetwork: true @@ -3914,7 +3914,7 @@ seccompProfiles: supplementalGroups: type: RunAsAny users: - - system:serviceaccount:default:ko-kepler + - system:serviceaccount:default:k8smon-kepler volumes: - configMap - hostPath @@ -3924,7 +3924,7 @@ volumes: apiVersion: security.openshift.io/v1 kind: SecurityContextConstraints metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs allowHostDirVolumePlugin: true allowHostIPC: false allowHostNetwork: false @@ -3977,7 +3977,7 @@ volumes: apiVersion: security.openshift.io/v1 kind: SecurityContextConstraints metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics allowHostDirVolumePlugin: false allowHostIPC: false allowHostNetwork: false @@ -4029,7 +4029,7 @@ volumes: apiVersion: security.openshift.io/v1 kind: SecurityContextConstraints metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton allowHostDirVolumePlugin: false allowHostIPC: false allowHostNetwork: false diff --git a/charts/k8s-monitoring/docs/examples/private-image-registries/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/private-image-registries/alloy-metrics.alloy index ec41b580a..cbc31f6b1 100644 --- a/charts/k8s-monitoring/docs/examples/private-image-registries/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/private-image-registries/alloy-metrics.alloy @@ -54,7 +54,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -161,7 +161,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -172,7 +172,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -186,7 +186,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -197,7 +197,7 @@ declare "cluster_metrics" { node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -227,7 +227,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -279,7 +279,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/private-image-registries/output.yaml b/charts/k8s-monitoring/docs/examples/private-image-registries/output.yaml index 0d2257a4a..bf8e273e6 100644 --- a/charts/k8s-monitoring/docs/examples/private-image-registries/output.yaml +++ b/charts/k8s-monitoring/docs/examples/private-image-registries/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -42,10 +42,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default imagePullSecrets: - name: my-registry-creds @@ -54,7 +54,7 @@ imagePullSecrets: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -62,9 +62,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon automountServiceAccountToken: false imagePullSecrets: - name: my-registry-creds @@ -73,7 +73,7 @@ imagePullSecrets: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -81,9 +81,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon imagePullSecrets: - name: my-registry-creds --- @@ -91,7 +91,7 @@ imagePullSecrets: apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -99,9 +99,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -114,7 +114,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -174,7 +174,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -281,7 +281,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -292,7 +292,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -306,7 +306,7 @@ data: } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -317,7 +317,7 @@ data: node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -347,7 +347,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -399,7 +399,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -445,7 +445,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -597,7 +597,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -1794,7 +1794,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-system + name: k8smon-alloy-module-system data: node-exporter_metrics.alloy: | /* @@ -2054,11 +2054,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2154,11 +2154,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2260,10 +2260,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -2410,11 +2410,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2423,21 +2423,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2446,10 +2446,10 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -2462,28 +2462,28 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2493,7 +2493,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2505,11 +2505,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2521,7 +2521,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -2538,11 +2538,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2552,7 +2552,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -2564,7 +2564,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -2572,9 +2572,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -2586,13 +2586,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2600,9 +2600,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2614,13 +2614,13 @@ spec: name: metrics selector: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2628,9 +2628,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -2643,17 +2643,17 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/alloy-logs/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -2663,16 +2663,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs imagePullSecrets: - name: my-registry-creds containers: @@ -2752,7 +2752,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log @@ -2764,7 +2764,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -2772,14 +2772,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon revisionHistoryLimit: 10 updateStrategy: rollingUpdate: @@ -2796,9 +2796,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2806,7 +2806,7 @@ spec: runAsGroup: 65534 runAsNonRoot: true runAsUser: 65534 - serviceAccountName: ko-node-exporter + serviceAccountName: k8smon-node-exporter containers: - name: node-exporter image: my.registry.com/prometheus/node-exporter:v1.8.2 @@ -2893,7 +2893,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -2901,14 +2901,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -2924,9 +2924,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -2938,7 +2938,7 @@ spec: image: my.registry.com/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: my.registry.com/prometheus-community/windows-exporter:0.29.2 @@ -2990,13 +2990,13 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -3004,14 +3004,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -3024,13 +3024,13 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -3088,11 +3088,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3101,11 +3101,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -3113,9 +3113,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics imagePullSecrets: - name: my-registry-creds containers: @@ -3129,7 +3129,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -3192,4 +3192,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/proxies/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/proxies/alloy-metrics.alloy index 001f00c82..afc3adc17 100644 --- a/charts/k8s-monitoring/docs/examples/proxies/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/proxies/alloy-metrics.alloy @@ -55,7 +55,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -162,7 +162,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -173,7 +173,7 @@ declare "cluster_metrics" { kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -187,7 +187,7 @@ declare "cluster_metrics" { } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -198,7 +198,7 @@ declare "cluster_metrics" { node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -228,7 +228,7 @@ declare "cluster_metrics" { } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } diff --git a/charts/k8s-monitoring/docs/examples/proxies/alloy-singleton.alloy b/charts/k8s-monitoring/docs/examples/proxies/alloy-singleton.alloy index eed9a72b9..5e48e69a9 100644 --- a/charts/k8s-monitoring/docs/examples/proxies/alloy-singleton.alloy +++ b/charts/k8s-monitoring/docs/examples/proxies/alloy-singleton.alloy @@ -96,7 +96,7 @@ discovery.relabel "kubernetes_monitoring_telemetry" { rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" diff --git a/charts/k8s-monitoring/docs/examples/proxies/output.yaml b/charts/k8s-monitoring/docs/examples/proxies/output.yaml index be47a7d95..011c84daf 100644 --- a/charts/k8s-monitoring/docs/examples/proxies/output.yaml +++ b/charts/k8s-monitoring/docs/examples/proxies/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,12 +19,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -35,12 +35,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-profiles + name: k8smon-alloy-profiles namespace: default labels: helm.sh/chart: alloy-profiles-0.10.0 app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -51,12 +51,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -67,12 +67,12 @@ metadata: apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -90,17 +90,17 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -108,16 +108,16 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon automountServiceAccountToken: false --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -125,15 +125,15 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/config.yaml apiVersion: v1 kind: ConfigMap metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -141,9 +141,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon data: config.yml: | collectors: @@ -156,7 +156,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -217,7 +217,7 @@ data: } remote.kubernetes.configmap "kubernetes" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -324,7 +324,7 @@ data: } remote.kubernetes.configmap "kube_state_metrics" { - name = "ko-alloy-module-kubernetes" + name = "k8smon-alloy-module-kubernetes" namespace = "default" } @@ -335,7 +335,7 @@ data: kube_state_metrics.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=kube-state-metrics", - "release=ko", + "release=k8smon", ] } @@ -349,7 +349,7 @@ data: } remote.kubernetes.configmap "node_exporter" { - name = "ko-alloy-module-system" + name = "k8smon-alloy-module-system" namespace = "default" } @@ -360,7 +360,7 @@ data: node_exporter.kubernetes "targets" { label_selectors = [ "app.kubernetes.io/name=node-exporter", - "release=ko", + "release=k8smon", ] } @@ -390,7 +390,7 @@ data: } selectors { role = "pod" - label = "app.kubernetes.io/name=windows-exporter,release=ko" + label = "app.kubernetes.io/name=windows-exporter,release=k8smon" } } @@ -433,7 +433,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default data: config.alloy: |- @@ -535,7 +535,7 @@ data: rule { target_label = "instance" action = "replace" - replacement = "ko" + replacement = "k8smon" } rule { target_label = "job" @@ -584,7 +584,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default data: config.alloy: |- @@ -736,7 +736,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default data: config.alloy: |- @@ -972,7 +972,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-profiles + name: k8smon-alloy-profiles namespace: default data: config.alloy: |- @@ -1828,7 +1828,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-kubernetes + name: k8smon-alloy-module-kubernetes data: core_metrics.alloy: | /* @@ -3025,7 +3025,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-module-system + name: k8smon-alloy-module-system data: node-exporter_metrics.alloy: | /* @@ -3285,11 +3285,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3385,11 +3385,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3485,11 +3485,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-profiles + name: k8smon-alloy-profiles labels: helm.sh/chart: alloy-profiles-0.10.0 app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3585,11 +3585,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3685,11 +3685,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3791,10 +3791,10 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics rules: - apiGroups: ["certificates.k8s.io"] @@ -3941,11 +3941,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3954,21 +3954,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-logs + name: k8smon-alloy-logs subjects: - kind: ServiceAccount - name: ko-alloy-logs + name: k8smon-alloy-logs namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -3977,21 +3977,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-profiles/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-profiles + name: k8smon-alloy-profiles labels: helm.sh/chart: alloy-profiles-0.10.0 app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4000,21 +4000,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-profiles + name: k8smon-alloy-profiles subjects: - kind: ServiceAccount - name: ko-alloy-profiles + name: k8smon-alloy-profiles namespace: default --- # Source: k8s-monitoring/charts/alloy-receiver/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4023,21 +4023,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-receiver + name: k8smon-alloy-receiver subjects: - kind: ServiceAccount - name: ko-alloy-receiver + name: k8smon-alloy-receiver namespace: default --- # Source: k8s-monitoring/charts/alloy-singleton/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4046,10 +4046,10 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-singleton + name: k8smon-alloy-singleton subjects: - kind: ServiceAccount - name: ko-alloy-singleton + name: k8smon-alloy-singleton namespace: default --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -4062,28 +4062,28 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko - name: ko-kube-state-metrics + release: k8smon + name: k8smon-kube-state-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics subjects: - kind: ServiceAccount - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-logs/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4093,7 +4093,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -4105,11 +4105,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4121,7 +4121,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -4138,11 +4138,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4152,7 +4152,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -4164,11 +4164,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-profiles + name: k8smon-alloy-profiles labels: helm.sh/chart: alloy-profiles-0.10.0 app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4178,7 +4178,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -4190,11 +4190,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4204,7 +4204,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -4220,11 +4220,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4234,7 +4234,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -4246,7 +4246,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -4254,9 +4254,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon annotations: spec: type: "ClusterIP" @@ -4268,13 +4268,13 @@ spec: selector: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -4282,9 +4282,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -4296,13 +4296,13 @@ spec: name: metrics selector: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/clusterMetrics/charts/windows-exporter/templates/service.yaml apiVersion: v1 kind: Service metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -4310,9 +4310,9 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon annotations: prometheus.io/scrape: "true" spec: @@ -4325,17 +4325,17 @@ spec: name: metrics selector: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon --- # Source: k8s-monitoring/charts/alloy-logs/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-logs + name: k8smon-alloy-logs labels: helm.sh/chart: alloy-logs-0.10.0 app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4345,16 +4345,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-logs - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-logs + serviceAccountName: k8smon-alloy-logs containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -4432,7 +4432,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-logs + name: k8smon-alloy-logs - name: varlog hostPath: path: /var/log @@ -4444,11 +4444,11 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-profiles + name: k8smon-alloy-profiles labels: helm.sh/chart: alloy-profiles-0.10.0 app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4458,16 +4458,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-profiles - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-profiles + serviceAccountName: k8smon-alloy-profiles containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -4525,17 +4525,17 @@ spec: volumes: - name: config configMap: - name: ko-alloy-profiles + name: k8smon-alloy-profiles --- # Source: k8s-monitoring/charts/alloy-receiver/templates/controllers/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-alloy-receiver + name: k8smon-alloy-receiver labels: helm.sh/chart: alloy-receiver-0.10.0 app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4545,16 +4545,16 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: kubectl.kubernetes.io/default-container: alloy labels: app.kubernetes.io/name: alloy-receiver - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-receiver + serviceAccountName: k8smon-alloy-receiver containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -4635,13 +4635,13 @@ spec: volumes: - name: config configMap: - name: ko-alloy-receiver + name: k8smon-alloy-receiver --- # Source: k8s-monitoring/charts/clusterMetrics/charts/node-exporter/templates/daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-node-exporter + name: k8smon-node-exporter namespace: default labels: helm.sh/chart: node-exporter-4.42.0 @@ -4649,14 +4649,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon revisionHistoryLimit: 10 updateStrategy: rollingUpdate: @@ -4673,9 +4673,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: node-exporter app.kubernetes.io/name: node-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "1.8.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -4683,7 +4683,7 @@ spec: runAsGroup: 65534 runAsNonRoot: true runAsUser: 65534 - serviceAccountName: ko-node-exporter + serviceAccountName: k8smon-node-exporter containers: - name: node-exporter image: quay.io/prometheus/node-exporter:v1.8.2 @@ -4768,7 +4768,7 @@ spec: apiVersion: apps/v1 kind: DaemonSet metadata: - name: ko-windows-exporter + name: k8smon-windows-exporter namespace: default labels: helm.sh/chart: windows-exporter-0.7.1 @@ -4776,14 +4776,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon updateStrategy: rollingUpdate: maxUnavailable: 1 @@ -4799,9 +4799,9 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: windows-exporter app.kubernetes.io/name: windows-exporter - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "0.29.2" - release: ko + release: k8smon spec: automountServiceAccountToken: false securityContext: @@ -4813,7 +4813,7 @@ spec: image: ghcr.io/prometheus-community/windows-exporter:0.29.2 command: [ "powershell" ] args: [ "New-NetFirewallRule", "-DisplayName", "'windows-exporter'", "-Direction", "inbound", "-Profile", "Any", "-Action", "Allow", "-LocalPort", "9182", "-Protocol", "TCP" ] - serviceAccountName: ko-windows-exporter + serviceAccountName: k8smon-windows-exporter containers: - name: windows-exporter image: ghcr.io/prometheus-community/windows-exporter:0.29.2 @@ -4863,17 +4863,17 @@ spec: volumes: - name: config configMap: - name: ko-windows-exporter + name: k8smon-windows-exporter --- # Source: k8s-monitoring/charts/alloy-singleton/templates/controllers/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-alloy-singleton + name: k8smon-alloy-singleton labels: helm.sh/chart: alloy-singleton-0.10.0 app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -4884,7 +4884,7 @@ spec: selector: matchLabels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -4892,9 +4892,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-singleton - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-singleton + serviceAccountName: k8smon-alloy-singleton containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -4966,13 +4966,13 @@ spec: volumes: - name: config configMap: - name: ko-alloy-singleton + name: k8smon-alloy-singleton --- # Source: k8s-monitoring/charts/clusterMetrics/charts/kube-state-metrics/templates/deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: - name: ko-kube-state-metrics + name: k8smon-kube-state-metrics namespace: default labels: helm.sh/chart: kube-state-metrics-5.27.0 @@ -4980,14 +4980,14 @@ metadata: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: selector: matchLabels: app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon replicas: 1 strategy: type: Recreate @@ -5000,13 +5000,13 @@ spec: app.kubernetes.io/component: metrics app.kubernetes.io/part-of: kube-state-metrics app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "2.14.0" - release: ko + release: k8smon spec: automountServiceAccountToken: true hostNetwork: false - serviceAccountName: ko-kube-state-metrics + serviceAccountName: k8smon-kube-state-metrics securityContext: fsGroup: 65534 runAsGroup: 65534 @@ -5062,11 +5062,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -5075,11 +5075,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -5087,9 +5087,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -5101,7 +5101,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=generally-available env: @@ -5164,4 +5164,4 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics diff --git a/charts/k8s-monitoring/docs/examples/remote-config/alloy-metrics.alloy b/charts/k8s-monitoring/docs/examples/remote-config/alloy-metrics.alloy index ee1c2ff65..c4b0b5105 100644 --- a/charts/k8s-monitoring/docs/examples/remote-config/alloy-metrics.alloy +++ b/charts/k8s-monitoring/docs/examples/remote-config/alloy-metrics.alloy @@ -2,7 +2,7 @@ remote.kubernetes.secret "alloy_metrics_remote_cfg" { - name = "alloy-metrics-remote-cfg-ko-k8s-monitoring" + name = "alloy-metrics-remote-cfg-k8smon-k8s-monitoring" namespace = "default" } diff --git a/charts/k8s-monitoring/docs/examples/remote-config/output.yaml b/charts/k8s-monitoring/docs/examples/remote-config/output.yaml index 3fc5210e3..6c33f5adf 100644 --- a/charts/k8s-monitoring/docs/examples/remote-config/output.yaml +++ b/charts/k8s-monitoring/docs/examples/remote-config/output.yaml @@ -3,12 +3,12 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -19,7 +19,7 @@ metadata: apiVersion: v1 kind: Secret metadata: - name: "alloy-metrics-remote-cfg-ko-k8s-monitoring" + name: "alloy-metrics-remote-cfg-k8smon-k8s-monitoring" namespace: "default" type: Opaque data: @@ -30,7 +30,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default data: config.alloy: |- @@ -38,7 +38,7 @@ data: remote.kubernetes.secret "alloy_metrics_remote_cfg" { - name = "alloy-metrics-remote-cfg-ko-k8s-monitoring" + name = "alloy-metrics-remote-cfg-k8smon-k8s-monitoring" namespace = "default" } @@ -61,11 +61,11 @@ data: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -161,11 +161,11 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -174,21 +174,21 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: ko-alloy-metrics + name: k8smon-alloy-metrics subjects: - kind: ServiceAccount - name: ko-alloy-metrics + name: k8smon-alloy-metrics namespace: default --- # Source: k8s-monitoring/charts/alloy-metrics/templates/cluster_service.yaml apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics-cluster + name: k8smon-alloy-metrics-cluster labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -200,7 +200,7 @@ spec: publishNotReadyAddresses: true selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon ports: # Do not include the -metrics suffix in the port name, otherwise metrics # can be double-collected with the non-headless Service if it's also @@ -217,11 +217,11 @@ spec: apiVersion: v1 kind: Service metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -231,7 +231,7 @@ spec: type: ClusterIP selector: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon internalTrafficPolicy: Cluster ports: - name: http-metrics @@ -243,11 +243,11 @@ spec: apiVersion: apps/v1 kind: StatefulSet metadata: - name: ko-alloy-metrics + name: k8smon-alloy-metrics labels: helm.sh/chart: alloy-metrics-0.10.0 app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon app.kubernetes.io/version: "v1.5.0" app.kubernetes.io/managed-by: Helm @@ -256,11 +256,11 @@ spec: replicas: 1 podManagementPolicy: Parallel minReadySeconds: 10 - serviceName: ko-alloy-metrics + serviceName: k8smon-alloy-metrics selector: matchLabels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon template: metadata: annotations: @@ -268,9 +268,9 @@ spec: k8s.grafana.com/logs.job: integrations/alloy labels: app.kubernetes.io/name: alloy-metrics - app.kubernetes.io/instance: ko + app.kubernetes.io/instance: k8smon spec: - serviceAccountName: ko-alloy-metrics + serviceAccountName: k8smon-alloy-metrics containers: - name: alloy image: docker.io/grafana/alloy:v1.5.0 @@ -282,7 +282,7 @@ spec: - --server.http.listen-addr=0.0.0.0:12345 - --server.http.ui-path-prefix=/ - --cluster.enabled=true - - --cluster.join-addresses=ko-alloy-metrics-cluster + - --cluster.join-addresses=k8smon-alloy-metrics-cluster - --cluster.name="alloy-metrics" - --stability.level=public-preview env: @@ -345,7 +345,7 @@ spec: volumes: - name: config configMap: - name: ko-alloy-metrics + name: k8smon-alloy-metrics --- # Source: k8s-monitoring/templates/remote_config_secret.yaml # create: true diff --git a/charts/k8s-monitoring/templates/features/_feature_helpers.tpl b/charts/k8s-monitoring/templates/features/_feature_helpers.tpl index d8f0e7c2f..faee7a0ec 100644 --- a/charts/k8s-monitoring/templates/features/_feature_helpers.tpl +++ b/charts/k8s-monitoring/templates/features/_feature_helpers.tpl @@ -4,6 +4,7 @@ - autoInstrumentation - clusterMetrics - clusterEvents +- nodeLogs - podLogs - profiling - prometheusOperatorObjects diff --git a/charts/k8s-monitoring/templates/features/_feature_node_logs.tpl b/charts/k8s-monitoring/templates/features/_feature_node_logs.tpl new file mode 100644 index 000000000..9cc954156 --- /dev/null +++ b/charts/k8s-monitoring/templates/features/_feature_node_logs.tpl @@ -0,0 +1,40 @@ +{{- define "features.nodeLogs.enabled" }}{{ .Values.nodeLogs.enabled }}{{- end }} + +{{- define "features.nodeLogs.collectors" }} +{{- if .Values.nodeLogs.enabled -}} +- {{ .Values.nodeLogs.collector }} +{{- end }} +{{- end }} + +{{- define "features.nodeLogs.include" }} +{{- if .Values.nodeLogs.enabled -}} +{{- $destinations := include "features.nodeLogs.destinations" . | fromYamlArray }} + +// Feature: Node Logs +{{- include "feature.nodeLogs.module" (dict "Values" .Values.nodeLogs "Files" $.Subcharts.nodeLogs.Files) }} +node_logs "feature" { + logs_destinations = [ + {{ include "destinations.alloy.targets" (dict "destinations" $.Values.destinations "names" $destinations "type" "logs" "ecosystem" "loki") | indent 4 | trim }} + ] +} +{{- end -}} +{{- end -}} + +{{- define "features.nodeLogs.destinations" }} +{{- if .Values.nodeLogs.enabled -}} +{{- include "destinations.get" (dict "destinations" $.Values.destinations "type" "logs" "ecosystem" "loki" "filter" $.Values.nodeLogs.destinations) -}} +{{- end -}} +{{- end -}} + +{{- define "features.nodeLogs.validate" }} +{{- if .Values.nodeLogs.enabled -}} +{{- $featureName := "Kubernetes Node logs" }} +{{- $destinations := include "features.nodeLogs.destinations" . | fromYamlArray }} +{{- include "destinations.validate_destination_list" (dict "destinations" $destinations "type" "logs" "ecosystem" "loki" "feature" $featureName) }} + +{{- range $collector := include "features.nodeLogs.collectors" . | fromYamlArray }} + {{- include "collectors.require_collector" (dict "Values" $.Values "name" $collector "feature" $featureName) }} + {{- include "feature.nodeLogs.collector.validate" (dict "Values" $.Values.nodeLogs "Collector" (index $.Values $collector) "CollectorName" $collector) }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k8s-monitoring/templates/features/_feature_pod_logs.tpl b/charts/k8s-monitoring/templates/features/_feature_pod_logs.tpl index cdef7193e..d083ce276 100644 --- a/charts/k8s-monitoring/templates/features/_feature_pod_logs.tpl +++ b/charts/k8s-monitoring/templates/features/_feature_pod_logs.tpl @@ -34,10 +34,10 @@ pod_logs "feature" { {{- $featureName := "Kubernetes Pod logs" }} {{- $destinations := include "features.podLogs.destinations" . | fromYamlArray }} {{- include "destinations.validate_destination_list" (dict "destinations" $destinations "type" "logs" "ecosystem" "loki" "feature" $featureName) }} -{{- include "collectors.require_collector" (dict "Values" $.Values "name" "alloy-logs" "feature" $featureName) }} {{- range $collector := include "features.podLogs.collectors" . | fromYamlArray }} {{- include "collectors.require_collector" (dict "Values" $.Values "name" $collector "feature" $featureName) }} + {{- include "feature.podLogs.collector.validate" (dict "Values" $.Values.nodeLogs "Collector" (index $.Values $collector) "CollectorName" $collector) }} {{- end -}} {{- end -}} {{- end -}} diff --git a/charts/k8s-monitoring/tests/integration/auth/.rendered/output.yaml b/charts/k8s-monitoring/tests/integration/auth/.rendered/output.yaml new file mode 100644 index 000000000..3b81e9b06 --- /dev/null +++ b/charts/k8s-monitoring/tests/integration/auth/.rendered/output.yaml @@ -0,0 +1,1850 @@ +--- +# Source: k8s-monitoring/charts/alloy-metrics/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: k8smon-alloy-metrics + namespace: default + labels: + helm.sh/chart: alloy-metrics-0.10.0 + app.kubernetes.io/name: alloy-metrics + app.kubernetes.io/instance: k8smon + + app.kubernetes.io/version: "v1.5.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: alloy + app.kubernetes.io/component: rbac +--- +# Source: k8s-monitoring/templates/destination_secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: "prometheus-basicauth-k8smon-k8s-monitoring" + namespace: "default" +type: Opaque +data: + username: "cHJvbXVzZXI=" + password: "cHJvbXBhc3N3b3Jk" +--- +# Source: k8s-monitoring/templates/destination_secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: "prometheus-bearer-token-k8smon-k8s-monitoring" + namespace: "default" +type: Opaque +data: + bearerToken: "bXktc2VjcmV0LXJlbW90ZS13cml0ZS10b2tlbg==" +--- +# Source: k8s-monitoring/templates/destination_secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: "prometheus-otlp-basicauth-k8smon-k8s-monitoring" + namespace: "default" +type: Opaque +data: + username: "b3RlbHVzZXI=" + password: "b3RlbHBhc3N3b3Jk" +--- +# Source: k8s-monitoring/templates/destination_secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: "prometheus-otlp-bearer-token-k8smon-k8s-monitoring" + namespace: "default" +type: Opaque +data: + bearerToken: "bXktc2VjcmV0LW90bHAtdG9rZW4=" +--- +# Source: k8s-monitoring/templates/alloy-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8smon-alloy-metrics + namespace: default +data: + config.alloy: |- + // Destination: prometheus-noauth (prometheus) + otelcol.exporter.prometheus "prometheus_noauth" { + add_metric_suffixes = true + forward_to = [prometheus.remote_write.prometheus_noauth.receiver] + } + + prometheus.remote_write "prometheus_noauth" { + endpoint { + url = "http://prometheus-server.prometheus.svc:9090/api/v1/write" + headers = { + } + tls_config { + insecure_skip_verify = false + } + send_native_histograms = false + + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "auth-integration-test" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "auth-integration-test" + target_label = "cluster" + } + } + + wal { + truncate_frequency = "2h" + min_keepalive_time = "5m" + max_keepalive_time = "8h" + } + external_labels = { + destination = "prometheus-noauth", + } + } + // Destination: prometheus-basicauth (prometheus) + otelcol.exporter.prometheus "prometheus_basicauth" { + add_metric_suffixes = true + forward_to = [prometheus.remote_write.prometheus_basicauth.receiver] + } + + prometheus.remote_write "prometheus_basicauth" { + endpoint { + url = "http://nginx-basic-auth.prometheus.svc/auth-gateway/api/v1/write" + headers = { + } + basic_auth { + username = nonsensitive(remote.kubernetes.secret.prometheus_basicauth.data["username"]) + password = remote.kubernetes.secret.prometheus_basicauth.data["password"] + } + tls_config { + insecure_skip_verify = false + } + send_native_histograms = false + + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "auth-integration-test" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "auth-integration-test" + target_label = "cluster" + } + } + + wal { + truncate_frequency = "2h" + min_keepalive_time = "5m" + max_keepalive_time = "8h" + } + external_labels = { + destination = "prometheus-basicauth", + } + } + + remote.kubernetes.secret "prometheus_basicauth" { + name = "prometheus-basicauth-k8smon-k8s-monitoring" + namespace = "default" + } + + // Destination: prometheus-bearer-token (prometheus) + otelcol.exporter.prometheus "prometheus_bearer_token" { + add_metric_suffixes = true + forward_to = [prometheus.remote_write.prometheus_bearer_token.receiver] + } + + prometheus.remote_write "prometheus_bearer_token" { + endpoint { + url = "http://nginx-bearer-token.prometheus.svc/remote-write/api/v1/write" + headers = { + } + bearer_token = remote.kubernetes.secret.prometheus_bearer_token.data["bearerToken"] + tls_config { + insecure_skip_verify = false + } + send_native_histograms = false + + queue_config { + capacity = 10000 + min_shards = 1 + max_shards = 50 + max_samples_per_send = 2000 + batch_send_deadline = "5s" + min_backoff = "30ms" + max_backoff = "5s" + retry_on_http_429 = true + sample_age_limit = "0s" + } + + write_relabel_config { + source_labels = ["cluster"] + regex = "" + replacement = "auth-integration-test" + target_label = "cluster" + } + write_relabel_config { + source_labels = ["k8s.cluster.name"] + regex = "" + replacement = "auth-integration-test" + target_label = "cluster" + } + } + + wal { + truncate_frequency = "2h" + min_keepalive_time = "5m" + max_keepalive_time = "8h" + } + external_labels = { + destination = "prometheus-bearer-token", + } + } + + remote.kubernetes.secret "prometheus_bearer_token" { + name = "prometheus-bearer-token-k8smon-k8s-monitoring" + namespace = "default" + } + + // Destination: prometheus-otlp-noauth (otlp) + otelcol.receiver.prometheus "prometheus_otlp_noauth" { + output { + metrics = [otelcol.processor.attributes.prometheus_otlp_noauth.input] + } + } + + otelcol.processor.attributes "prometheus_otlp_noauth" { + action { + key = "cluster" + action = "upsert" + value = "auth-integration-test" + } + action { + key = "k8s.cluster.name" + action = "upsert" + value = "auth-integration-test" + } + action { + key = "destination" + action = "upsert" + value = "prometheus-otlp-noauth" + } + output { + metrics = [otelcol.processor.transform.prometheus_otlp_noauth.input] + logs = [otelcol.processor.transform.prometheus_otlp_noauth.input] + traces = [otelcol.processor.transform.prometheus_otlp_noauth.input] + } + } + + otelcol.processor.transform "prometheus_otlp_noauth" { + error_mode = "ignore" + + output { + metrics = [otelcol.processor.batch.prometheus_otlp_noauth.input] + } + } + + otelcol.processor.batch "prometheus_otlp_noauth" { + timeout = "2s" + send_batch_size = 8192 + send_batch_max_size = 0 + + output { + metrics = [otelcol.exporter.otlphttp.prometheus_otlp_noauth.input] + } + } + otelcol.exporter.otlphttp "prometheus_otlp_noauth" { + client { + endpoint = "http://prometheus-server.prometheus.svc:9090/api/v1/otlp" + tls { + insecure = false + insecure_skip_verify = false + } + } + } + // Destination: prometheus-otlp-basicauth (otlp) + otelcol.receiver.prometheus "prometheus_otlp_basicauth" { + output { + metrics = [otelcol.processor.attributes.prometheus_otlp_basicauth.input] + } + } + otelcol.auth.basic "prometheus_otlp_basicauth" { + username = nonsensitive(remote.kubernetes.secret.prometheus_otlp_basicauth.data["username"]) + password = remote.kubernetes.secret.prometheus_otlp_basicauth.data["password"] + } + + otelcol.processor.attributes "prometheus_otlp_basicauth" { + action { + key = "cluster" + action = "upsert" + value = "auth-integration-test" + } + action { + key = "k8s.cluster.name" + action = "upsert" + value = "auth-integration-test" + } + action { + key = "destination" + action = "upsert" + value = "prometheus-otlp-basicauth" + } + output { + metrics = [otelcol.processor.transform.prometheus_otlp_basicauth.input] + logs = [otelcol.processor.transform.prometheus_otlp_basicauth.input] + traces = [otelcol.processor.transform.prometheus_otlp_basicauth.input] + } + } + + otelcol.processor.transform "prometheus_otlp_basicauth" { + error_mode = "ignore" + + output { + metrics = [otelcol.processor.batch.prometheus_otlp_basicauth.input] + } + } + + otelcol.processor.batch "prometheus_otlp_basicauth" { + timeout = "2s" + send_batch_size = 8192 + send_batch_max_size = 0 + + output { + metrics = [otelcol.exporter.otlphttp.prometheus_otlp_basicauth.input] + } + } + otelcol.exporter.otlphttp "prometheus_otlp_basicauth" { + client { + endpoint = "http://nginx-basic-auth.prometheus.svc/auth-gateway/api/v1/otlp" + auth = otelcol.auth.basic.prometheus_otlp_basicauth.handler + tls { + insecure = false + insecure_skip_verify = false + } + } + } + + remote.kubernetes.secret "prometheus_otlp_basicauth" { + name = "prometheus-otlp-basicauth-k8smon-k8s-monitoring" + namespace = "default" + } + + // Destination: prometheus-otlp-bearer-token (otlp) + otelcol.receiver.prometheus "prometheus_otlp_bearer_token" { + output { + metrics = [otelcol.processor.attributes.prometheus_otlp_bearer_token.input] + } + } + otelcol.auth.bearer "prometheus_otlp_bearer_token" { + token = remote.kubernetes.secret.prometheus_otlp_bearer_token.data["bearerToken"] + } + + otelcol.processor.attributes "prometheus_otlp_bearer_token" { + action { + key = "cluster" + action = "upsert" + value = "auth-integration-test" + } + action { + key = "k8s.cluster.name" + action = "upsert" + value = "auth-integration-test" + } + action { + key = "destination" + action = "upsert" + value = "prometheus-otlp-bearer-token" + } + output { + metrics = [otelcol.processor.transform.prometheus_otlp_bearer_token.input] + logs = [otelcol.processor.transform.prometheus_otlp_bearer_token.input] + traces = [otelcol.processor.transform.prometheus_otlp_bearer_token.input] + } + } + + otelcol.processor.transform "prometheus_otlp_bearer_token" { + error_mode = "ignore" + + output { + metrics = [otelcol.processor.batch.prometheus_otlp_bearer_token.input] + } + } + + otelcol.processor.batch "prometheus_otlp_bearer_token" { + timeout = "2s" + send_batch_size = 8192 + send_batch_max_size = 0 + + output { + metrics = [otelcol.exporter.otlphttp.prometheus_otlp_bearer_token.input] + } + } + otelcol.exporter.otlphttp "prometheus_otlp_bearer_token" { + client { + endpoint = "http://nginx-bearer-token.prometheus.svc/otlp/api/v1/otlp" + auth = otelcol.auth.bearer.prometheus_otlp_bearer_token.handler + tls { + insecure = false + insecure_skip_verify = false + } + } + } + + remote.kubernetes.secret "prometheus_otlp_bearer_token" { + name = "prometheus-otlp-bearer-token-k8smon-k8s-monitoring" + namespace = "default" + } + + // Feature: Cluster Metrics + declare "cluster_metrics" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + remote.kubernetes.configmap "kubernetes" { + name = "k8smon-alloy-module-kubernetes" + namespace = "default" + } + + import.string "kubernetes" { + content = remote.kubernetes.configmap.kubernetes.data["core_metrics.alloy"] + } + + kubernetes.kubelet "scrape" { + clustering = true + keep_metrics = "up|container_cpu_usage_seconds_total|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_used|kubernetes_build_info|namespace_workload_pod|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes|kubernetes_build_info" + scrape_interval = "60s" + max_cache_size = 100000 + forward_to = argument.metrics_destinations.value + } + } + cluster_metrics "feature" { + metrics_destinations = [ + prometheus.remote_write.prometheus_noauth.receiver, + prometheus.remote_write.prometheus_basicauth.receiver, + prometheus.remote_write.prometheus_bearer_token.receiver, + otelcol.receiver.prometheus.prometheus_otlp_noauth.receiver, + otelcol.receiver.prometheus.prometheus_otlp_basicauth.receiver, + otelcol.receiver.prometheus.prometheus_otlp_bearer_token.receiver, + ] + } + + // Self Reporting + prometheus.exporter.unix "kubernetes_monitoring_telemetry" { + set_collectors = ["textfile"] + textfile { + directory = "/etc/alloy" + } + } + + discovery.relabel "kubernetes_monitoring_telemetry" { + targets = prometheus.exporter.unix.kubernetes_monitoring_telemetry.targets + rule { + target_label = "instance" + action = "replace" + replacement = "k8smon" + } + rule { + target_label = "job" + action = "replace" + replacement = "integrations/kubernetes/kubernetes_monitoring_telemetry" + } + } + + prometheus.scrape "kubernetes_monitoring_telemetry" { + job_name = "integrations/kubernetes/kubernetes_monitoring_telemetry" + targets = discovery.relabel.kubernetes_monitoring_telemetry.output + scrape_interval = "1h" + clustering { + enabled = true + } + forward_to = [prometheus.relabel.kubernetes_monitoring_telemetry.receiver] + } + + prometheus.relabel "kubernetes_monitoring_telemetry" { + rule { + source_labels = ["__name__"] + regex = "grafana_kubernetes_monitoring_.*" + action = "keep" + } + forward_to = [ + prometheus.remote_write.prometheus_noauth.receiver, + prometheus.remote_write.prometheus_basicauth.receiver, + prometheus.remote_write.prometheus_bearer_token.receiver, + ] + } + + livedebugging { + enabled = true + } + + + self-reporting-metric.prom: | + # HELP grafana_kubernetes_monitoring_build_info A metric to report the version of the Kubernetes Monitoring Helm chart + # TYPE grafana_kubernetes_monitoring_build_info gauge + grafana_kubernetes_monitoring_build_info{version="2.0.0-rc.5", namespace="default"} 1 + # HELP grafana_kubernetes_monitoring_feature_info A metric to report the enabled features of the Kubernetes Monitoring Helm chart + # TYPE grafana_kubernetes_monitoring_feature_info gauge + grafana_kubernetes_monitoring_feature_info{deployments="%!s()", feature="clusterMetrics", sources="kubelet", version="1.0.0"} 1 +--- +# Source: k8s-monitoring/templates/alloy-modules-configmaps.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8smon-alloy-module-kubernetes +data: + core_metrics.alloy: | + /* + Module: job-cadvisor + Description: Scrapes cadvisor + + Note: Every argument except for "forward_to" is optional, and does have a defined default value. However, the values for these + arguments are not defined using the default = " ... " argument syntax, but rather using the coalesce(argument.value, " ... "). + This is because if the argument passed in from another consuming module is set to null, the default = " ... " syntax will + does not override the value passed in, where coalesce() will return the first non-null value. + */ + declare "cadvisor" { + argument "forward_to" { + comment = "Must be a list(MetricsReceiver) where collected logs should be forwarded to" + } + argument "field_selectors" { + // Docs: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + comment = "The label selectors to use to find matching targets (default: [\"metadata.name=kubernetes\"])" + optional = true + } + argument "label_selectors" { + // Docs: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + comment = "The label selectors to use to find matching targets (default: [])" + optional = true + } + argument "job_label" { + comment = "The job label to add for all cadvisor metric (default: integrations/kubernetes/cadvisor)" + optional = true + } + argument "keep_metrics" { + comment = "A regular expression of metrics to keep (default: see below)" + optional = true + } + argument "drop_metrics" { + comment = "A regular expression of metrics to drop (default: see below)" + optional = true + } + argument "scrape_interval" { + comment = "How often to scrape metrics from the targets (default: 60s)" + optional = true + } + argument "scrape_timeout" { + comment = "How long before a scrape times out (default: 10s)" + optional = true + } + argument "max_cache_size" { + comment = "The maximum number of elements to hold in the relabeling cache (default: 100000). This should be at least 2x-5x your largest scrape target or samples appended rate." + optional = true + } + argument "clustering" { + // Docs: https://grafana.com/docs/agent/latest/flow/concepts/clustering/ + comment = "Whether or not clustering should be enabled (default: false)" + optional = true + } + + export "output" { + value = discovery.relabel.cadvisor.output + } + + // cadvisor service discovery for all of the nodes + discovery.kubernetes "cadvisor" { + role = "node" + + selectors { + role = "node" + field = join(coalesce(argument.field_selectors.value, []), ",") + label = join(coalesce(argument.label_selectors.value, []), ",") + } + } + + // cadvisor relabelings (pre-scrape) + discovery.relabel "cadvisor" { + targets = discovery.kubernetes.cadvisor.targets + + // set the address to use the kubernetes service dns name + rule { + target_label = "__address__" + replacement = "kubernetes.default.svc.cluster.local:443" + } + + // set the metrics path to use the proxy path to the nodes cadvisor metrics endpoint + rule { + source_labels = ["__meta_kubernetes_node_name"] + regex = "(.+)" + replacement = "/api/v1/nodes/${1}/proxy/metrics/cadvisor" + target_label = "__metrics_path__" + } + + // set the node label + rule { + source_labels = ["__meta_kubernetes_node_name"] + target_label = "node" + } + + // set the app name if specified as metadata labels "app:" or "app.kubernetes.io/name:" or "k8s-app:" + rule { + action = "replace" + source_labels = [ + "__meta_kubernetes_node_label_app_kubernetes_io_name", + "__meta_kubernetes_node_label_k8s_app", + "__meta_kubernetes_node_label_app", + ] + separator = ";" + regex = "^(?:;*)?([^;]+).*$" + replacement = "$1" + target_label = "app" + } + + // set a source label + rule { + action = "replace" + replacement = "kubernetes" + target_label = "source" + } + } + + // cadvisor scrape job + prometheus.scrape "cadvisor" { + job_name = coalesce(argument.job_label.value, "integrations/kubernetes/cadvisor") + forward_to = [prometheus.relabel.cadvisor.receiver] + targets = discovery.relabel.cadvisor.output + scheme = "https" + scrape_interval = coalesce(argument.scrape_interval.value, "60s") + scrape_timeout = coalesce(argument.scrape_timeout.value, "10s") + bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + + tls_config { + ca_file = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + insecure_skip_verify = false + server_name = "kubernetes" + } + + clustering { + enabled = coalesce(argument.clustering.value, false) + } + } + + // cadvisor metric relabelings (post-scrape) + prometheus.relabel "cadvisor" { + forward_to = argument.forward_to.value + max_cache_size = coalesce(argument.max_cache_size.value, 100000) + + // drop metrics that match the drop_metrics regex + rule { + source_labels = ["__name__"] + regex = coalesce(argument.drop_metrics.value, "(^(go|process)_.+$)") + action = "drop" + } + + // keep only metrics that match the keep_metrics regex + rule { + source_labels = ["__name__"] + regex = coalesce(argument.keep_metrics.value, "(up|container_(cpu_(cfs_(periods|throttled_periods)_total|usage_seconds_total)|fs_(reads|writes)(_bytes)?_total|memory_(cache|rss|swap|working_set_bytes)|network_(receive|transmit)_(bytes|packets(_dropped)?_total))|machine_memory_bytes)") + action = "keep" + } + + // Drop empty container labels, addressing https://github.com/google/cadvisor/issues/2688 + rule { + source_labels = ["__name__","container"] + separator = "@" + regex = "(container_cpu_.*|container_fs_.*|container_memory_.*)@" + action = "drop" + } + + // Drop empty image labels, addressing https://github.com/google/cadvisor/issues/2688 + rule { + source_labels = ["__name__","image"] + separator = "@" + regex = "(container_cpu_.*|container_fs_.*|container_memory_.*|container_network_.*)@" + action = "drop" + } + + // Normalizing unimportant labels (not deleting to continue satisfying