diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index 602b330edf..0000000000
--- a/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-**Is this a BUG REPORT or FEATURE REQUEST?**:
-
-> Uncomment only one, leave it on its own line:
->
-> /kind bug
-> /kind feature
-
-
-**What happened**:
-
-**What you expected to happen**:
-
-**How to reproduce it (as minimally and precisely as possible)**:
-
-
-**Anything else we need to know?**:
-
-**Environment**:
-- Kubernetes version (use `kubectl version`):
-- Kube-state-metrics image version
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000000..bd31c20df2
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,27 @@
+---
+name: Bug report
+about: Report a bug encountered while running kube-state-metrics
+title: ''
+labels: kind/bug
+assignees: ''
+
+---
+
+
+
+**What happened**:
+
+**What you expected to happen**:
+
+**How to reproduce it (as minimally and precisely as possible)**:
+
+**Anything else we need to know?**:
+
+**Environment**:
+- kube-state-metrics version:
+- Kubernetes version (use `kubectl version`):
+- Cloud provider or hardware configuration:
+- Other info:
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000000..f3fa65f79f
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,18 @@
+---
+name: Feature request
+about: Suggest a new feature
+title: ''
+labels: kind/feature
+assignees: ''
+
+---
+
+
+
+**What would you like to be added**:
+
+**Why is this needed**:
+
+**Describe the solution you'd like**
+
+**Additional context**
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ef8463e46e..3496fc1008 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -26,7 +26,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
- go-version: ^1.14
+ go-version: ^1.15
id: go
- name: Check out code into the Go module directory
@@ -49,7 +49,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
- go-version: ^1.14
+ go-version: ^1.15
id: go
- name: Check out code into the Go module directory
@@ -71,7 +71,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
- go-version: ^1.14
+ go-version: ^1.15
id: go
- name: Check out code into the Go module directory
@@ -93,7 +93,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
- go-version: ^1.14
+ go-version: ^1.15
id: go
- name: Check out code into the Go module directory
@@ -115,7 +115,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
- go-version: ^1.14
+ go-version: ^1.15
id: go
- name: Check out code into the Go module directory
@@ -137,7 +137,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
- go-version: ^1.14
+ go-version: ^1.15
id: go
- name: Check out code into the Go module directory
@@ -159,7 +159,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
- go-version: ^1.14
+ go-version: ^1.15
id: go
- name: Check out code into the Go module directory
@@ -181,7 +181,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
- go-version: ^1.14
+ go-version: ^1.15
id: go
- name: Check out code into the Go module directory
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0b55250659..1412411496 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,13 @@
+## v2.0.0-alpha.2 / 2020-10-27
+
+* [CHANGE] Migrate ingress and certificates to new stable APIs #1260
+* [CHANGE] Revert "Rework resource metrics" #1278
+To better align with future Kubernetes resource metrics, the changes to resource metrics were reverted, new metrics are:
+kube_pod_container_resource_requests and kube_pod_container_resource_limits
+* [FEATURE] Added the job failure reason in kube_job_status_failed metric #1214
+* [FEATURE] feat(persistentvolume): claimRef info to labels (kube_persistentvolume_claim_ref) #1244
+* [FEATURE] pod: add gauge for runtimeclass handler (kube_pod_runtimeclass_name_info) #1276
+
## v2.0.0-alpha.1 / 2020-10-06
* [CHANGE] Update go module path to k8s.io/kube-state-metrics/v2 #1238
diff --git a/Dockerfile b/Dockerfile
index a953dbea6e..534cd1dd02 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-ARG GOVERSION=1.14
+ARG GOVERSION=1.15
FROM golang:${GOVERSION} as builder
ARG GOARCH
ENV GOARCH=${GOARCH}
diff --git a/Makefile b/Makefile
index 5e993c3960..8158e744d7 100644
--- a/Makefile
+++ b/Makefile
@@ -3,16 +3,16 @@ TESTENVVAR =
REGISTRY ?= gcr.io/k8s-staging-kube-state-metrics
TAG_PREFIX = v
VERSION = $(shell cat VERSION)
-TAG = $(TAG_PREFIX)$(VERSION)
+TAG ?= $(TAG_PREFIX)$(VERSION)
LATEST_RELEASE_BRANCH := release-$(shell grep -ohE "[0-9]+.[0-9]+" VERSION)
DOCKER_CLI ?= docker
PKGS = $(shell go list ./... | grep -v /vendor/ | grep -v /tests/e2e)
ARCH ?= $(shell go env GOARCH)
-BuildDate = $(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
-Commit = $(shell git rev-parse --short HEAD)
+BUILD_DATE = $(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
+GIT_COMMIT ?= $(shell git rev-parse --short HEAD)
ALL_ARCH = amd64 arm arm64 ppc64le s390x
PKG = k8s.io/kube-state-metrics/v2/pkg
-GO_VERSION = 1.14.7
+GO_VERSION = 1.15.3
IMAGE = $(REGISTRY)/kube-state-metrics
MULTI_ARCH_IMG = $(IMAGE)-$(ARCH)
@@ -54,7 +54,7 @@ doccheck: generate
@echo OK
build-local:
- GOOS=$(shell uname -s | tr A-Z a-z) GOARCH=$(ARCH) CGO_ENABLED=0 go build -ldflags "-s -w -X ${PKG}/version.Release=${TAG} -X ${PKG}/version.Commit=${Commit} -X ${PKG}/version.BuildDate=${BuildDate}" -o kube-state-metrics
+ GOOS=$(shell uname -s | tr A-Z a-z) GOARCH=$(ARCH) CGO_ENABLED=0 go build -ldflags "-s -w -X ${PKG}/version.Release=${TAG} -X ${PKG}/version.Commit=${GIT_COMMIT} -X ${PKG}/version.BuildDate=${BUILD_DATE}" -o kube-state-metrics
build: kube-state-metrics
@@ -99,7 +99,7 @@ do-push-%:
push-multi-arch:
${DOCKER_CLI} manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g")
- @for arch in $(ALL_ARCH); do ${DOCKER_CLI} manifest annotate --arch $${arch} $(IMAGE):$(TAG) $(IMAGE)-$${arch}:${TAG}; done
+ @for arch in $(ALL_ARCH); do ${DOCKER_CLI} manifest annotate --arch $${arch} $(IMAGE):$(TAG) $(IMAGE)-$${arch}:$(TAG); done
${DOCKER_CLI} manifest push --purge $(IMAGE):$(TAG)
quay-push: .quay-push-$(ARCH)
diff --git a/README.md b/README.md
index c78a36b492..db6339f773 100644
--- a/README.md
+++ b/README.md
@@ -26,6 +26,8 @@ the raw metrics. Note that the metrics exposed on the `/metrics` endpoint
reflect the current state of the Kubernetes cluster. When Kubernetes objects
are deleted they are no longer visible on the `/metrics` endpoint.
+Note that any new features will be merged into master but released with v2.1.0 release, as currently v2.0.0 is in post feature freeze and only accepting bug fixes.
+
## Table of Contents
- [Versioning](#versioning)
@@ -61,18 +63,22 @@ The compatibility matrix for client-go and Kubernetes cluster can be found
All additional compatibility is only best effort, or happens to still/already be supported.
#### Compatibility matrix
+
At most, 5 kube-state-metrics and 5 [kubernetes releases](https://github.com/kubernetes/kubernetes/releases) will be recorded below.
| kube-state-metrics | **Kubernetes 1.15** | **Kubernetes 1.16** | **Kubernetes 1.17** | **Kubernetes 1.18** | **Kubernetes 1.19** |
|--------------------|---------------------|---------------------|----------------------|----------------------|----------------------|
| **v1.8.0** | ✓ | - | - | - | - |
| **v1.9.7** | - | ✓ | - | - | - |
-| **v2.0.0-alpha.1** | - | - | ✓ | ✓ | ✓ |
-| **master** | - | - | ✓ | ✓ | ✓ |
+| **v2.0.0-alpha.2** | - | - | -/✓ | -/✓ | ✓ |
+| **master** | - | - | -/✓ | -/✓ | ✓ |
- `✓` Fully supported version range.
- `-` The Kubernetes cluster has features the client-go library can't use (additional API objects, deprecated APIs, etc).
+**Note:** The `v2.0.0-alpha.2+` and `master` releases of kube-state-metrics work on Kubernetes v1.17 and v1.18 excluding Ingress or CertificateSigningRequest resource metrics. If you require those metrics and are on an older Kubernetes version, use v2.0.0-alpha.1 or v1.9.7 kube-state-metrics release.
+
#### Resource group version compatibility
+
Resources in Kubernetes can evolve, i.e., the group version for a resource may change from alpha to beta and finally GA
in different Kubernetes versions. For now, kube-state-metrics will only use the oldest API available in the latest
release.
@@ -80,10 +86,10 @@ release.
#### Container Image
The latest container image can be found at:
-* `quay.io/coreos/kube-state-metrics:v2.0.0-alpha.1`
-* `k8s.gcr.io/kube-state-metrics/kube-state-metrics:2.0.0-alpha.1`
-* `k8s.gcr.io/kube-state-metrics/kube-state-metrics-arm:2.0.0-alpha.1`
-* `k8s.gcr.io/kube-state-metrics/kube-state-metrics-arm64:2.0.0-alpha.1`
+* `quay.io/coreos/kube-state-metrics:v2.0.0-alpha.2`
+* `k8s.gcr.io/kube-state-metrics/kube-state-metrics:2.0.0-alpha.2`
+* `k8s.gcr.io/kube-state-metrics/kube-state-metrics-arm:2.0.0-alpha.2`
+* `k8s.gcr.io/kube-state-metrics/kube-state-metrics-arm64:2.0.0-alpha.2`
### Metrics Documentation
@@ -184,14 +190,14 @@ It is a cluster level component which periodically scrapes metrics from all
Kubernetes nodes served by Kubelet through Summary API. The metrics are
aggregated, stored in memory and served in [Metrics API
format](https://git.k8s.io/metrics/pkg/apis/metrics/v1alpha1/types.go). The
-metric-server stores the latest values only and is not responsible for
+metrics-server stores the latest values only and is not responsible for
forwarding metrics to third-party destinations.
kube-state-metrics is focused on generating completely new metrics from
Kubernetes' object state (e.g. metrics based on deployments, replica sets,
etc.). It holds an entire snapshot of Kubernetes state in memory and
continuously generates new metrics based off of it. And just like the
-metric-server it too is not responsibile for exporting its metrics anywhere.
+metrics-server it too is not responsibile for exporting its metrics anywhere.
Having kube-state-metrics as a separate project also enables access to these
metrics from monitoring systems such as Prometheus.
@@ -283,7 +289,7 @@ subjects:
namespace: your-namespace-where-kube-state-metrics-will-deployed
```
-- then specify a set of namespaces (using the `--namespace` option) and a set of kubernetes objects (using the `--resources`) that your serviceaccount has access to in the `kube-state-metrics` deployment configuration
+- then specify a set of namespaces (using the `--namespaces` option) and a set of kubernetes objects (using the `--resources`) that your serviceaccount has access to in the `kube-state-metrics` deployment configuration
```yaml
spec:
@@ -293,7 +299,7 @@ spec:
- name: kube-state-metrics
args:
- '--resources=pods'
- - '--namespace=project1'
+ - '--namespaces=project1'
```
For the full list of arguments available, see the documentation in [docs/cli-arguments.md](./docs/cli-arguments.md)
diff --git a/VERSION b/VERSION
index e05cd46980..453787a2fb 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.0.0-alpha.1
+2.0.0-alpha.2
diff --git a/cloudbuild.yaml b/cloudbuild.yaml
index 27d9dbed7e..c0ad68388d 100644
--- a/cloudbuild.yaml
+++ b/cloudbuild.yaml
@@ -6,7 +6,7 @@ steps:
- name: 'gcr.io/k8s-testimages/gcb-docker-gcloud:v20190906-745fed4'
entrypoint: make
env:
- - GIT_TAG=$_PULL_BASE_REF
+ - TAG=$_PULL_BASE_REF
- GIT_COMMIT=$_PULL_BASE_SHA
args:
- push
diff --git a/docs/job-metrics.md b/docs/job-metrics.md
index 66377dff17..d0ec58ce60 100644
--- a/docs/job-metrics.md
+++ b/docs/job-metrics.md
@@ -10,7 +10,7 @@
| kube_job_spec_active_deadline_seconds | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE |
| kube_job_status_active | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE |
| kube_job_status_succeeded | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE |
-| kube_job_status_failed | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE |
+| kube_job_status_failed | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace>
`reason`=<failure reason> | STABLE |
| kube_job_status_start_time | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE |
| kube_job_status_completion_time | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace> | STABLE |
| kube_job_complete | Gauge | `job_name`=<job-name>
`namespace`=<job-namespace>
`condition`=<true\|false\|unknown> | STABLE |
diff --git a/docs/persistentvolume-metrics.md b/docs/persistentvolume-metrics.md
index e46748f296..910584bd02 100644
--- a/docs/persistentvolume-metrics.md
+++ b/docs/persistentvolume-metrics.md
@@ -4,6 +4,7 @@
| ---------- | ----------- | ----------- | ----------- |
| kube_persistentvolume_capacity_bytes | Gauge | `persistentvolume`=<pv-name> | STABLE |
| kube_persistentvolume_status_phase | Gauge | `persistentvolume`=<pv-name>
`phase`=<Bound\|Failed\|Pending\|Available\|Released>| STABLE |
+| kube_persistentvolume_claim_ref | Gauge | `persistentvolume`=<pv-name>
`claim_namespace`=<>
`name`=<> | STABLE |
| kube_persistentvolume_labels | Gauge | `persistentvolume`=<persistentvolume-name>
`label_PERSISTENTVOLUME_LABEL`=<PERSISTENTVOLUME_LABEL> | STABLE |
| kube_persistentvolume_info | Gauge | `persistentvolume`=<pv-name>
`storageclass`=<storageclass-name>
`gce_persistent_disk_name`=<pd-name>
`ebs_volume_id`=<ebs-volume-id>
`fc_wwids`=<fc-wwids-comma-separated>
`fc_lun`=<fc-lun>
`fc_target_wwns`=<fc-target-wwns-comma-separated>
`iscsi_target_portal`=<iscsi-target-portal>
`iscsi_iqn`=<iscsi-iqn>
`iscsi_lun`=<iscsi-lun>
`iscsi_initiator_name`=<iscsi-initiator-name>
`nfs_server`=<nfs-server>
`nfs_path`=<nfs-path> | STABLE |
diff --git a/docs/pod-metrics.md b/docs/pod-metrics.md
index 1127e381de..cb88fa369f 100644
--- a/docs/pod-metrics.md
+++ b/docs/pod-metrics.md
@@ -20,18 +20,11 @@
| kube_pod_container_status_last_terminated_reason | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`reason`=<OOMKilled\|Error\|Completed\|ContainerCannotRun\|DeadlineExceeded> | STABLE |
| kube_pod_container_status_ready | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | STABLE |
| kube_pod_container_status_restarts_total | Counter | `container`=<container-name>
`namespace`=<pod-namespace>
`pod`=<pod-name> | STABLE |
-| kube_pod_container_resource_requests | Gauge | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
-| kube_pod_container_resource_requests_cpu_cores | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
-| kube_pod_container_resource_requests_memory_bytes | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
-| kube_pod_container_resource_requests_storage_bytes | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
-| kube_pod_container_resource_requests_ephemeral_storage_bytes | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
+| kube_pod_container_resource_requests | Gauge | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace>
`node`=< node-name> | EXPERIMENTAL |
| kube_pod_container_resource_limits | Gauge | `resource`=<resource-name>
`unit`=<resource-unit>
`container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
-| kube_pod_container_resource_limits_cpu_cores | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
-| kube_pod_container_resource_limits_memory_bytes | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
-| kube_pod_container_resource_limits_storage_bytes | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
-| kube_pod_container_resource_limits_ephemeral_storage_bytes | Gauge | `container`=<container-name>
`pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
| kube_pod_overhead_cpu_cores | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
| kube_pod_overhead_memory_bytes | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
+| kube_pod_runtimeclass_name_info | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
| kube_pod_created | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace> | STABLE |
| kube_pod_deletion_timestamp | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace> | EXPERIMENTAL |
| kube_pod_restart_policy | Gauge | `pod`=<pod-name>
`namespace`=<pod-namespace>
`type`=<Always|Never|OnFailure> | STABLE |
diff --git a/docs/service-metrics.md b/docs/service-metrics.md
index 822144113f..30c1ab00a0 100644
--- a/docs/service-metrics.md
+++ b/docs/service-metrics.md
@@ -2,7 +2,7 @@
| Metric name| Metric type | Labels/tags | Status |
| ---------- | ----------- | ----------- | ----------- |
-| kube_service_info | Gauge | `service`=<service-name>
`namespace`=<service-namespace>
`cluster_ip`=<service cluster ip>
`external_name`=<service external name> `load_balancer_ip`=<service load balancer ip> | STABLE |
+| kube_service_info | Gauge | `service`=<service-name>
`namespace`=<service-namespace>
`cluster_ip`=<service cluster ip>
`external_name`=<service external name>
`load_balancer_ip`=<service load balancer ip> | STABLE |
| kube_service_labels | Gauge | `service`=<service-name>
`namespace`=<service-namespace>
`label_SERVICE_LABEL`=<SERVICE_LABEL> | STABLE |
| kube_service_created | Gauge | `service`=<service-name>
`namespace`=<service-namespace> | STABLE |
| kube_service_spec_type | Gauge | `service`=<service-name>
`namespace`=<service-namespace>
`type`=<ClusterIP\|NodePort\|LoadBalancer\|ExternalName> | STABLE |
diff --git a/examples/autosharding/cluster-role-binding.yaml b/examples/autosharding/cluster-role-binding.yaml
index 078dda4f9e..2e272fd541 100644
--- a/examples/autosharding/cluster-role-binding.yaml
+++ b/examples/autosharding/cluster-role-binding.yaml
@@ -3,7 +3,7 @@ kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha
+ app.kubernetes.io/version: 2.0.0-alpha.2
name: kube-state-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
diff --git a/examples/autosharding/cluster-role.yaml b/examples/autosharding/cluster-role.yaml
index 88f69b474f..a165754100 100644
--- a/examples/autosharding/cluster-role.yaml
+++ b/examples/autosharding/cluster-role.yaml
@@ -3,7 +3,7 @@ kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha
+ app.kubernetes.io/version: 2.0.0-alpha.2
name: kube-state-metrics
rules:
- apiGroups:
@@ -30,7 +30,6 @@ rules:
- daemonsets
- deployments
- replicasets
- - ingresses
verbs:
- list
- watch
@@ -105,6 +104,7 @@ rules:
- networking.k8s.io
resources:
- networkpolicies
+ - ingresses
verbs:
- list
- watch
diff --git a/examples/autosharding/role-binding.yaml b/examples/autosharding/role-binding.yaml
index 229a41e444..2970db880a 100644
--- a/examples/autosharding/role-binding.yaml
+++ b/examples/autosharding/role-binding.yaml
@@ -3,8 +3,9 @@ kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha
+ app.kubernetes.io/version: 2.0.0-alpha.2
name: kube-state-metrics
+ namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
diff --git a/examples/autosharding/role.yaml b/examples/autosharding/role.yaml
index cb4385195a..ddb997dbbd 100644
--- a/examples/autosharding/role.yaml
+++ b/examples/autosharding/role.yaml
@@ -3,7 +3,7 @@ kind: Role
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha
+ app.kubernetes.io/version: 2.0.0-alpha.2
name: kube-state-metrics
namespace: kube-system
rules:
diff --git a/examples/autosharding/service-account.yaml b/examples/autosharding/service-account.yaml
index f25780f832..df94eb06c1 100644
--- a/examples/autosharding/service-account.yaml
+++ b/examples/autosharding/service-account.yaml
@@ -3,6 +3,6 @@ kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha
+ app.kubernetes.io/version: 2.0.0-alpha.2
name: kube-state-metrics
namespace: kube-system
diff --git a/examples/autosharding/service.yaml b/examples/autosharding/service.yaml
index d3974fc1b8..bc7767090d 100644
--- a/examples/autosharding/service.yaml
+++ b/examples/autosharding/service.yaml
@@ -3,7 +3,7 @@ kind: Service
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha
+ app.kubernetes.io/version: 2.0.0-alpha.2
name: kube-state-metrics
namespace: kube-system
spec:
diff --git a/examples/autosharding/statefulset.yaml b/examples/autosharding/statefulset.yaml
index e1c459febf..4ae62e14bb 100644
--- a/examples/autosharding/statefulset.yaml
+++ b/examples/autosharding/statefulset.yaml
@@ -3,7 +3,7 @@ kind: StatefulSet
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha
+ app.kubernetes.io/version: 2.0.0-alpha.2
name: kube-state-metrics
namespace: kube-system
spec:
@@ -16,7 +16,7 @@ spec:
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha
+ app.kubernetes.io/version: 2.0.0-alpha.2
spec:
containers:
- args:
@@ -24,16 +24,14 @@ spec:
- --pod-namespace=$(POD_NAMESPACE)
env:
- name: POD_NAME
- value: ""
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
- value: ""
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- image: quay.io/coreos/kube-state-metrics:v2.0.0-alpha
+ image: quay.io/coreos/kube-state-metrics:v2.0.0-alpha.2
livenessProbe:
httpGet:
path: /healthz
@@ -57,4 +55,3 @@ spec:
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: kube-state-metrics
- volumeClaimTemplates: []
diff --git a/examples/standard/cluster-role-binding.yaml b/examples/standard/cluster-role-binding.yaml
index 5b3145cbee..2e272fd541 100644
--- a/examples/standard/cluster-role-binding.yaml
+++ b/examples/standard/cluster-role-binding.yaml
@@ -3,7 +3,7 @@ kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha.1
+ app.kubernetes.io/version: 2.0.0-alpha.2
name: kube-state-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
diff --git a/examples/standard/cluster-role.yaml b/examples/standard/cluster-role.yaml
index e5e72c766b..a165754100 100644
--- a/examples/standard/cluster-role.yaml
+++ b/examples/standard/cluster-role.yaml
@@ -3,7 +3,7 @@ kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha.1
+ app.kubernetes.io/version: 2.0.0-alpha.2
name: kube-state-metrics
rules:
- apiGroups:
@@ -30,7 +30,6 @@ rules:
- daemonsets
- deployments
- replicasets
- - ingresses
verbs:
- list
- watch
@@ -105,6 +104,7 @@ rules:
- networking.k8s.io
resources:
- networkpolicies
+ - ingresses
verbs:
- list
- watch
diff --git a/examples/standard/deployment.yaml b/examples/standard/deployment.yaml
index 877b961e8e..0b7d6d5964 100644
--- a/examples/standard/deployment.yaml
+++ b/examples/standard/deployment.yaml
@@ -3,7 +3,7 @@ kind: Deployment
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha.1
+ app.kubernetes.io/version: 2.0.0-alpha.2
name: kube-state-metrics
namespace: kube-system
spec:
@@ -15,10 +15,10 @@ spec:
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha.1
+ app.kubernetes.io/version: 2.0.0-alpha.2
spec:
containers:
- - image: quay.io/coreos/kube-state-metrics:v2.0.0-alpha.1
+ - image: quay.io/coreos/kube-state-metrics:v2.0.0-alpha.2
livenessProbe:
httpGet:
path: /healthz
diff --git a/examples/standard/service-account.yaml b/examples/standard/service-account.yaml
index b212f6185f..df94eb06c1 100644
--- a/examples/standard/service-account.yaml
+++ b/examples/standard/service-account.yaml
@@ -3,6 +3,6 @@ kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha.1
+ app.kubernetes.io/version: 2.0.0-alpha.2
name: kube-state-metrics
namespace: kube-system
diff --git a/examples/standard/service.yaml b/examples/standard/service.yaml
index 659063d6f4..bc7767090d 100644
--- a/examples/standard/service.yaml
+++ b/examples/standard/service.yaml
@@ -3,7 +3,7 @@ kind: Service
metadata:
labels:
app.kubernetes.io/name: kube-state-metrics
- app.kubernetes.io/version: 2.0.0-alpha.1
+ app.kubernetes.io/version: 2.0.0-alpha.2
name: kube-state-metrics
namespace: kube-system
spec:
diff --git a/internal/store/builder.go b/internal/store/builder.go
index 6c0ca94807..a6129892cc 100644
--- a/internal/store/builder.go
+++ b/internal/store/builder.go
@@ -29,10 +29,9 @@ import (
autoscaling "k8s.io/api/autoscaling/v2beta1"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
- certv1beta1 "k8s.io/api/certificates/v1beta1"
+ certv1 "k8s.io/api/certificates/v1"
coordinationv1 "k8s.io/api/coordination/v1"
v1 "k8s.io/api/core/v1"
- extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
policy "k8s.io/api/policy/v1beta1"
storagev1 "k8s.io/api/storage/v1"
@@ -235,7 +234,7 @@ func (b *Builder) buildHPAStore() cache.Store {
}
func (b *Builder) buildIngressStore() cache.Store {
- return b.buildStoreFunc(ingressMetricFamilies, &extensions.Ingress{}, createIngressListWatch)
+ return b.buildStoreFunc(ingressMetricFamilies, &networkingv1.Ingress{}, createIngressListWatch)
}
func (b *Builder) buildJobStore() cache.Store {
@@ -307,7 +306,7 @@ func (b *Builder) buildPodStore() cache.Store {
}
func (b *Builder) buildCsrStore() cache.Store {
- return b.buildStoreFunc(csrMetricFamilies, &certv1beta1.CertificateSigningRequest{}, createCSRListWatch)
+ return b.buildStoreFunc(csrMetricFamilies, &certv1.CertificateSigningRequest{}, createCSRListWatch)
}
func (b *Builder) buildValidatingWebhookConfigurationStore() cache.Store {
diff --git a/internal/store/certificatesigningrequest.go b/internal/store/certificatesigningrequest.go
index f12fb5a6f4..8e86a41880 100644
--- a/internal/store/certificatesigningrequest.go
+++ b/internal/store/certificatesigningrequest.go
@@ -22,7 +22,7 @@ import (
"k8s.io/kube-state-metrics/v2/pkg/metric"
generator "k8s.io/kube-state-metrics/v2/pkg/metric_generator"
- certv1beta1 "k8s.io/api/certificates/v1beta1"
+ certv1 "k8s.io/api/certificates/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
@@ -41,7 +41,7 @@ var (
descCSRLabelsHelp,
metric.Gauge,
"",
- wrapCSRFunc(func(j *certv1beta1.CertificateSigningRequest) *metric.Family {
+ wrapCSRFunc(func(j *certv1.CertificateSigningRequest) *metric.Family {
labelKeys, labelValues := kubeLabelsToPrometheusLabels(j.Labels)
return &metric.Family{
Metrics: []*metric.Metric{
@@ -59,7 +59,7 @@ var (
"Unix creation timestamp",
metric.Gauge,
"",
- wrapCSRFunc(func(csr *certv1beta1.CertificateSigningRequest) *metric.Family {
+ wrapCSRFunc(func(csr *certv1.CertificateSigningRequest) *metric.Family {
ms := []*metric.Metric{}
if !csr.CreationTimestamp.IsZero() {
ms = append(ms, &metric.Metric{
@@ -79,7 +79,7 @@ var (
"The number of each certificatesigningrequest condition",
metric.Gauge,
"",
- wrapCSRFunc(func(csr *certv1beta1.CertificateSigningRequest) *metric.Family {
+ wrapCSRFunc(func(csr *certv1.CertificateSigningRequest) *metric.Family {
return &metric.Family{
Metrics: addCSRConditionMetrics(csr.Status),
}
@@ -90,7 +90,7 @@ var (
"Length of the issued cert",
metric.Gauge,
"",
- wrapCSRFunc(func(csr *certv1beta1.CertificateSigningRequest) *metric.Family {
+ wrapCSRFunc(func(csr *certv1.CertificateSigningRequest) *metric.Family {
return &metric.Family{
Metrics: []*metric.Metric{
{
@@ -105,10 +105,9 @@ var (
}
)
-func wrapCSRFunc(f func(*certv1beta1.CertificateSigningRequest) *metric.Family) func(interface{}) *metric.Family {
+func wrapCSRFunc(f func(*certv1.CertificateSigningRequest) *metric.Family) func(interface{}) *metric.Family {
return func(obj interface{}) *metric.Family {
- csr := obj.(*certv1beta1.CertificateSigningRequest)
-
+ csr := obj.(*certv1.CertificateSigningRequest)
metricFamily := f(csr)
for _, m := range metricFamily.Metrics {
@@ -123,23 +122,23 @@ func wrapCSRFunc(f func(*certv1beta1.CertificateSigningRequest) *metric.Family)
func createCSRListWatch(kubeClient clientset.Interface, ns string) cache.ListerWatcher {
return &cache.ListWatch{
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
- return kubeClient.CertificatesV1beta1().CertificateSigningRequests().List(context.TODO(), opts)
+ return kubeClient.CertificatesV1().CertificateSigningRequests().List(context.TODO(), opts)
},
WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
- return kubeClient.CertificatesV1beta1().CertificateSigningRequests().Watch(context.TODO(), opts)
+ return kubeClient.CertificatesV1().CertificateSigningRequests().Watch(context.TODO(), opts)
},
}
}
// addCSRConditionMetrics generates one metric for each possible csr condition status
-func addCSRConditionMetrics(cs certv1beta1.CertificateSigningRequestStatus) []*metric.Metric {
+func addCSRConditionMetrics(cs certv1.CertificateSigningRequestStatus) []*metric.Metric {
cApproved := 0
cDenied := 0
for _, s := range cs.Conditions {
- if s.Type == certv1beta1.CertificateApproved {
+ if s.Type == certv1.CertificateApproved {
cApproved++
}
- if s.Type == certv1beta1.CertificateDenied {
+ if s.Type == certv1.CertificateDenied {
cDenied++
}
}
diff --git a/internal/store/certificatesigningrequest_test.go b/internal/store/certificatesigningrequest_test.go
index c093aaf5ea..d041350880 100644
--- a/internal/store/certificatesigningrequest_test.go
+++ b/internal/store/certificatesigningrequest_test.go
@@ -20,7 +20,7 @@ import (
"testing"
"time"
- certv1beta1 "k8s.io/api/certificates/v1beta1"
+ certv1 "k8s.io/api/certificates/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
generator "k8s.io/kube-state-metrics/v2/pkg/metric_generator"
@@ -39,7 +39,7 @@ func TestCsrStore(t *testing.T) {
`
cases := []generateMetricsTestCase{
{
- Obj: &certv1beta1.CertificateSigningRequest{
+ Obj: &certv1.CertificateSigningRequest{
ObjectMeta: metav1.ObjectMeta{
Name: "certificate-test",
Generation: 1,
@@ -48,8 +48,8 @@ func TestCsrStore(t *testing.T) {
},
CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)},
},
- Status: certv1beta1.CertificateSigningRequestStatus{},
- Spec: certv1beta1.CertificateSigningRequestSpec{},
+ Status: certv1.CertificateSigningRequestStatus{},
+ Spec: certv1.CertificateSigningRequestSpec{},
},
Want: metadata + `
kube_certificatesigningrequest_created{certificatesigningrequest="certificate-test"} 1.5e+09
@@ -61,7 +61,7 @@ func TestCsrStore(t *testing.T) {
MetricNames: []string{"kube_certificatesigningrequest_created", "kube_certificatesigningrequest_condition", "kube_certificatesigningrequest_labels", "kube_certificatesigningrequest_cert_length"},
},
{
- Obj: &certv1beta1.CertificateSigningRequest{
+ Obj: &certv1.CertificateSigningRequest{
ObjectMeta: metav1.ObjectMeta{
Name: "certificate-test",
Generation: 1,
@@ -70,14 +70,14 @@ func TestCsrStore(t *testing.T) {
},
CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)},
},
- Status: certv1beta1.CertificateSigningRequestStatus{
- Conditions: []certv1beta1.CertificateSigningRequestCondition{
+ Status: certv1.CertificateSigningRequestStatus{
+ Conditions: []certv1.CertificateSigningRequestCondition{
{
- Type: certv1beta1.CertificateDenied,
+ Type: certv1.CertificateDenied,
},
},
},
- Spec: certv1beta1.CertificateSigningRequestSpec{},
+ Spec: certv1.CertificateSigningRequestSpec{},
},
Want: metadata + `
kube_certificatesigningrequest_created{certificatesigningrequest="certificate-test"} 1.5e+09
@@ -89,7 +89,7 @@ func TestCsrStore(t *testing.T) {
MetricNames: []string{"kube_certificatesigningrequest_created", "kube_certificatesigningrequest_condition", "kube_certificatesigningrequest_labels", "kube_certificatesigningrequest_cert_length"},
},
{
- Obj: &certv1beta1.CertificateSigningRequest{
+ Obj: &certv1.CertificateSigningRequest{
ObjectMeta: metav1.ObjectMeta{
Name: "certificate-test",
Generation: 1,
@@ -98,14 +98,14 @@ func TestCsrStore(t *testing.T) {
},
CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)},
},
- Status: certv1beta1.CertificateSigningRequestStatus{
- Conditions: []certv1beta1.CertificateSigningRequestCondition{
+ Status: certv1.CertificateSigningRequestStatus{
+ Conditions: []certv1.CertificateSigningRequestCondition{
{
- Type: certv1beta1.CertificateApproved,
+ Type: certv1.CertificateApproved,
},
},
},
- Spec: certv1beta1.CertificateSigningRequestSpec{},
+ Spec: certv1.CertificateSigningRequestSpec{},
},
Want: metadata + `
kube_certificatesigningrequest_created{certificatesigningrequest="certificate-test"} 1.5e+09
@@ -117,7 +117,7 @@ func TestCsrStore(t *testing.T) {
MetricNames: []string{"kube_certificatesigningrequest_created", "kube_certificatesigningrequest_condition", "kube_certificatesigningrequest_labels", "kube_certificatesigningrequest_cert_length"},
},
{
- Obj: &certv1beta1.CertificateSigningRequest{
+ Obj: &certv1.CertificateSigningRequest{
ObjectMeta: metav1.ObjectMeta{
Name: "certificate-test",
Generation: 1,
@@ -126,11 +126,11 @@ func TestCsrStore(t *testing.T) {
},
CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)},
},
- Status: certv1beta1.CertificateSigningRequestStatus{
+ Status: certv1.CertificateSigningRequestStatus{
Certificate: []byte("just for test"),
- Conditions: []certv1beta1.CertificateSigningRequestCondition{
+ Conditions: []certv1.CertificateSigningRequestCondition{
{
- Type: certv1beta1.CertificateApproved,
+ Type: certv1.CertificateApproved,
},
},
},
@@ -145,7 +145,7 @@ func TestCsrStore(t *testing.T) {
MetricNames: []string{"kube_certificatesigningrequest_created", "kube_certificatesigningrequest_condition", "kube_certificatesigningrequest_labels", "kube_certificatesigningrequest_cert_length"},
},
{
- Obj: &certv1beta1.CertificateSigningRequest{
+ Obj: &certv1.CertificateSigningRequest{
ObjectMeta: metav1.ObjectMeta{
Name: "certificate-test",
Generation: 1,
@@ -154,13 +154,13 @@ func TestCsrStore(t *testing.T) {
},
CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)},
},
- Status: certv1beta1.CertificateSigningRequestStatus{
- Conditions: []certv1beta1.CertificateSigningRequestCondition{
+ Status: certv1.CertificateSigningRequestStatus{
+ Conditions: []certv1.CertificateSigningRequestCondition{
{
- Type: certv1beta1.CertificateApproved,
+ Type: certv1.CertificateApproved,
},
{
- Type: certv1beta1.CertificateDenied,
+ Type: certv1.CertificateDenied,
},
},
},
@@ -175,7 +175,7 @@ func TestCsrStore(t *testing.T) {
MetricNames: []string{"kube_certificatesigningrequest_created", "kube_certificatesigningrequest_condition", "kube_certificatesigningrequest_labels", "kube_certificatesigningrequest_cert_length"},
},
{
- Obj: &certv1beta1.CertificateSigningRequest{
+ Obj: &certv1.CertificateSigningRequest{
ObjectMeta: metav1.ObjectMeta{
Name: "certificate-test",
Generation: 1,
@@ -184,19 +184,19 @@ func TestCsrStore(t *testing.T) {
},
CreationTimestamp: metav1.Time{Time: time.Unix(1500000000, 0)},
},
- Status: certv1beta1.CertificateSigningRequestStatus{
- Conditions: []certv1beta1.CertificateSigningRequestCondition{
+ Status: certv1.CertificateSigningRequestStatus{
+ Conditions: []certv1.CertificateSigningRequestCondition{
{
- Type: certv1beta1.CertificateApproved,
+ Type: certv1.CertificateApproved,
},
{
- Type: certv1beta1.CertificateDenied,
+ Type: certv1.CertificateDenied,
},
{
- Type: certv1beta1.CertificateApproved,
+ Type: certv1.CertificateApproved,
},
{
- Type: certv1beta1.CertificateDenied,
+ Type: certv1.CertificateDenied,
},
},
},
diff --git a/internal/store/ingress.go b/internal/store/ingress.go
index 240712df97..d8e52e563c 100644
--- a/internal/store/ingress.go
+++ b/internal/store/ingress.go
@@ -18,12 +18,12 @@ package store
import (
"context"
+ "strconv"
"k8s.io/kube-state-metrics/v2/pkg/metric"
generator "k8s.io/kube-state-metrics/v2/pkg/metric_generator"
- "k8s.io/api/extensions/v1beta1"
-
+ networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
@@ -42,7 +42,7 @@ var (
"Information about ingress.",
metric.Gauge,
"",
- wrapIngressFunc(func(s *v1beta1.Ingress) *metric.Family {
+ wrapIngressFunc(func(s *networkingv1.Ingress) *metric.Family {
return &metric.Family{
Metrics: []*metric.Metric{
{
@@ -56,7 +56,7 @@ var (
descIngressLabelsHelp,
metric.Gauge,
"",
- wrapIngressFunc(func(i *v1beta1.Ingress) *metric.Family {
+ wrapIngressFunc(func(i *networkingv1.Ingress) *metric.Family {
labelKeys, labelValues := kubeLabelsToPrometheusLabels(i.Labels)
return &metric.Family{
Metrics: []*metric.Metric{
@@ -74,7 +74,7 @@ var (
"Unix creation timestamp",
metric.Gauge,
"",
- wrapIngressFunc(func(i *v1beta1.Ingress) *metric.Family {
+ wrapIngressFunc(func(i *networkingv1.Ingress) *metric.Family {
ms := []*metric.Metric{}
if !i.CreationTimestamp.IsZero() {
@@ -93,7 +93,7 @@ var (
"Resource version representing a specific version of ingress.",
metric.Gauge,
"",
- wrapIngressFunc(func(i *v1beta1.Ingress) *metric.Family {
+ wrapIngressFunc(func(i *networkingv1.Ingress) *metric.Family {
return &metric.Family{
Metrics: resourceVersionMetric(i.ObjectMeta.ResourceVersion),
}
@@ -104,14 +104,14 @@ var (
"Ingress host, paths and backend service information.",
metric.Gauge,
"",
- wrapIngressFunc(func(i *v1beta1.Ingress) *metric.Family {
+ wrapIngressFunc(func(i *networkingv1.Ingress) *metric.Family {
ms := []*metric.Metric{}
for _, rule := range i.Spec.Rules {
if rule.HTTP != nil {
for _, path := range rule.HTTP.Paths {
ms = append(ms, &metric.Metric{
LabelKeys: []string{"host", "path", "service_name", "service_port"},
- LabelValues: []string{rule.Host, path.Path, path.Backend.ServiceName, path.Backend.ServicePort.String()},
+ LabelValues: []string{rule.Host, path.Path, path.Backend.Service.Name, strconv.Itoa(int(path.Backend.Service.Port.Number))},
Value: 1,
})
}
@@ -127,7 +127,7 @@ var (
"Ingress TLS host and secret information.",
metric.Gauge,
"",
- wrapIngressFunc(func(i *v1beta1.Ingress) *metric.Family {
+ wrapIngressFunc(func(i *networkingv1.Ingress) *metric.Family {
ms := []*metric.Metric{}
for _, tls := range i.Spec.TLS {
for _, host := range tls.Hosts {
@@ -146,9 +146,9 @@ var (
}
)
-func wrapIngressFunc(f func(*v1beta1.Ingress) *metric.Family) func(interface{}) *metric.Family {
+func wrapIngressFunc(f func(*networkingv1.Ingress) *metric.Family) func(interface{}) *metric.Family {
return func(obj interface{}) *metric.Family {
- ingress := obj.(*v1beta1.Ingress)
+ ingress := obj.(*networkingv1.Ingress)
metricFamily := f(ingress)
@@ -164,10 +164,10 @@ func wrapIngressFunc(f func(*v1beta1.Ingress) *metric.Family) func(interface{})
func createIngressListWatch(kubeClient clientset.Interface, ns string) cache.ListerWatcher {
return &cache.ListWatch{
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
- return kubeClient.ExtensionsV1beta1().Ingresses(ns).List(context.TODO(), opts)
+ return kubeClient.NetworkingV1().Ingresses(ns).List(context.TODO(), opts)
},
WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
- return kubeClient.ExtensionsV1beta1().Ingresses(ns).Watch(context.TODO(), opts)
+ return kubeClient.NetworkingV1().Ingresses(ns).Watch(context.TODO(), opts)
},
}
}
diff --git a/internal/store/ingress_test.go b/internal/store/ingress_test.go
index b8f3d9f23e..191881f371 100644
--- a/internal/store/ingress_test.go
+++ b/internal/store/ingress_test.go
@@ -19,9 +19,8 @@ package store
import (
"testing"
- "k8s.io/api/extensions/v1beta1"
+ networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/util/intstr"
generator "k8s.io/kube-state-metrics/v2/pkg/metric_generator"
)
@@ -48,7 +47,7 @@ func TestIngressStore(t *testing.T) {
`
cases := []generateMetricsTestCase{
{
- Obj: &v1beta1.Ingress{
+ Obj: &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "ingress1",
Namespace: "ns1",
@@ -63,7 +62,7 @@ func TestIngressStore(t *testing.T) {
MetricNames: []string{"kube_ingress_info", "kube_ingress_metadata_resource_version", "kube_ingress_created", "kube_ingress_labels", "kube_ingress_path", "kube_ingress_tls"},
},
{
- Obj: &v1beta1.Ingress{
+ Obj: &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "ingress2",
Namespace: "ns2",
@@ -80,7 +79,7 @@ func TestIngressStore(t *testing.T) {
MetricNames: []string{"kube_ingress_info", "kube_ingress_metadata_resource_version", "kube_ingress_created", "kube_ingress_labels", "kube_ingress_path", "kube_ingress_tls"},
},
{
- Obj: &v1beta1.Ingress{
+ Obj: &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "ingress3",
Namespace: "ns3",
@@ -97,7 +96,7 @@ func TestIngressStore(t *testing.T) {
MetricNames: []string{"kube_ingress_info", "kube_ingress_metadata_resource_version", "kube_ingress_created", "kube_ingress_labels", "kube_ingress_path", "kube_ingress_tls"},
},
{
- Obj: &v1beta1.Ingress{
+ Obj: &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "ingress4",
Namespace: "ns4",
@@ -105,18 +104,22 @@ func TestIngressStore(t *testing.T) {
Labels: map[string]string{"test-4": "test-4"},
ResourceVersion: "abcdef",
},
- Spec: v1beta1.IngressSpec{
- Rules: []v1beta1.IngressRule{
+ Spec: networkingv1.IngressSpec{
+ Rules: []networkingv1.IngressRule{
{
Host: "somehost",
- IngressRuleValue: v1beta1.IngressRuleValue{
- HTTP: &v1beta1.HTTPIngressRuleValue{
- Paths: []v1beta1.HTTPIngressPath{
+ IngressRuleValue: networkingv1.IngressRuleValue{
+ HTTP: &networkingv1.HTTPIngressRuleValue{
+ Paths: []networkingv1.HTTPIngressPath{
{
Path: "/somepath",
- Backend: v1beta1.IngressBackend{
- ServiceName: "someservice",
- ServicePort: intstr.FromInt(1234),
+ Backend: networkingv1.IngressBackend{
+ Service: &networkingv1.IngressServiceBackend{
+ Name: "someservice",
+ Port: networkingv1.ServiceBackendPort{
+ Number: 1234,
+ },
+ },
},
},
},
@@ -138,7 +141,7 @@ func TestIngressStore(t *testing.T) {
MetricNames: []string{"kube_ingress_info", "kube_ingress_metadata_resource_version", "kube_ingress_created", "kube_ingress_labels", "kube_ingress_path", "kube_ingress_tls"},
},
{
- Obj: &v1beta1.Ingress{
+ Obj: &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "ingress5",
Namespace: "ns5",
@@ -146,8 +149,8 @@ func TestIngressStore(t *testing.T) {
Labels: map[string]string{"test-5": "test-5"},
ResourceVersion: "abcdef",
},
- Spec: v1beta1.IngressSpec{
- TLS: []v1beta1.IngressTLS{
+ Spec: networkingv1.IngressSpec{
+ TLS: []networkingv1.IngressTLS{
{
Hosts: []string{"somehost1", "somehost2"},
SecretName: "somesecret",
diff --git a/internal/store/job.go b/internal/store/job.go
index 03b463fae4..2807dc6beb 100644
--- a/internal/store/job.go
+++ b/internal/store/job.go
@@ -35,6 +35,7 @@ var (
descJobLabelsName = "kube_job_labels"
descJobLabelsHelp = "Kubernetes labels converted to Prometheus labels."
descJobLabelsDefaultLabels = []string{"namespace", "job_name"}
+ jobFailureReasons = []string{"BackoffLimitExceeded", "DeadLineExceeded", "Evicted"}
jobMetricFamilies = []generator.FamilyGenerator{
*generator.NewFamilyGenerator(
@@ -163,16 +164,48 @@ var (
),
*generator.NewFamilyGenerator(
"kube_job_status_failed",
- "The number of pods which reached Phase Failed.",
+ "The number of pods which reached Phase Failed and the reason for failure.",
metric.Gauge,
"",
wrapJobFunc(func(j *v1batch.Job) *metric.Family {
- return &metric.Family{
- Metrics: []*metric.Metric{
- {
- Value: float64(j.Status.Failed),
+ var ms []*metric.Metric
+
+ if float64(j.Status.Failed) == 0 {
+ return &metric.Family{
+ Metrics: []*metric.Metric{
+ {
+ Value: float64(j.Status.Failed),
+ },
},
- },
+ }
+ }
+
+ for _, condition := range j.Status.Conditions {
+ if condition.Type == v1batch.JobFailed {
+ reasonKnown := false
+ for _, reason := range jobFailureReasons {
+ reasonKnown = reasonKnown || failureReason(&condition, reason)
+
+ // for known reasons
+ ms = append(ms, &metric.Metric{
+ LabelKeys: []string{"reason"},
+ LabelValues: []string{reason},
+ Value: boolFloat64(failureReason(&condition, reason)),
+ })
+ }
+ // for unknown reasons
+ if !reasonKnown {
+ ms = append(ms, &metric.Metric{
+ LabelKeys: []string{"reason"},
+ LabelValues: []string{""},
+ Value: float64(j.Status.Failed),
+ })
+ }
+ }
+ }
+
+ return &metric.Family{
+ Metrics: ms,
}
}),
),
@@ -350,3 +383,10 @@ func createJobListWatch(kubeClient clientset.Interface, ns string) cache.ListerW
},
}
}
+
+func failureReason(jc *v1batch.JobCondition, reason string) bool {
+ if jc == nil {
+ return false
+ }
+ return jc.Reason == reason
+}
diff --git a/internal/store/job_test.go b/internal/store/job_test.go
index 6328df60dd..da2dca1c91 100644
--- a/internal/store/job_test.go
+++ b/internal/store/job_test.go
@@ -70,13 +70,13 @@ func TestJobStore(t *testing.T) {
# TYPE kube_job_status_active gauge
# HELP kube_job_status_completion_time CompletionTime represents time when the job was completed.
# TYPE kube_job_status_completion_time gauge
- # HELP kube_job_status_failed The number of pods which reached Phase Failed.
+ # HELP kube_job_status_failed The number of pods which reached Phase Failed and the reason for failure.
# TYPE kube_job_status_failed gauge
# HELP kube_job_status_start_time StartTime represents time when the job was acknowledged by the Job Manager.
# TYPE kube_job_status_start_time gauge
# HELP kube_job_status_succeeded The number of pods which reached Phase Succeeded.
- # TYPE kube_job_status_succeeded gauge
- `
+ # TYPE kube_job_status_succeeded gauge`
+
cases := []generateMetricsTestCase{
{
Obj: &v1batch.Job{
@@ -183,7 +183,7 @@ func TestJobStore(t *testing.T) {
CompletionTime: &metav1.Time{Time: FailedJob1CompletionTime},
StartTime: &metav1.Time{Time: FailedJob1StartTime},
Conditions: []v1batch.JobCondition{
- {Type: v1batch.JobFailed, Status: v1.ConditionTrue},
+ {Type: v1batch.JobFailed, Status: v1.ConditionTrue, Reason: "BackoffLimitExceeded"},
},
},
Spec: v1batch.JobSpec{
@@ -204,7 +204,9 @@ func TestJobStore(t *testing.T) {
kube_job_spec_parallelism{job_name="FailedJob1",namespace="ns1"} 1
kube_job_status_active{job_name="FailedJob1",namespace="ns1"} 0
kube_job_status_completion_time{job_name="FailedJob1",namespace="ns1"} 1.495810807e+09
- kube_job_status_failed{job_name="FailedJob1",namespace="ns1"} 1
+ kube_job_status_failed{job_name="FailedJob1",namespace="ns1",reason="BackoffLimitExceeded"} 1
+ kube_job_status_failed{job_name="FailedJob1",namespace="ns1",reason="DeadLineExceeded"} 0
+ kube_job_status_failed{job_name="FailedJob1",namespace="ns1",reason="Evicted"} 0
kube_job_status_start_time{job_name="FailedJob1",namespace="ns1"} 1.495807207e+09
kube_job_status_succeeded{job_name="FailedJob1",namespace="ns1"} 0
`,
diff --git a/internal/store/persistentvolume.go b/internal/store/persistentvolume.go
index 82dfa6b716..ed6a5ac207 100644
--- a/internal/store/persistentvolume.go
+++ b/internal/store/persistentvolume.go
@@ -32,11 +32,45 @@ import (
)
var (
+ descPersistentVolumeClaimRefName = "kube_persistentvolume_claim_ref"
+ descPersistentVolumeClaimRefHelp = "Information about the Persitant Volume Claim Reference."
+ descPersistentVolumeClaimRefDefaultLabels = []string{"persistentvolume"}
+
descPersistentVolumeLabelsName = "kube_persistentvolume_labels"
descPersistentVolumeLabelsHelp = "Kubernetes labels converted to Prometheus labels."
descPersistentVolumeLabelsDefaultLabels = []string{"persistentvolume"}
persistentVolumeMetricFamilies = []generator.FamilyGenerator{
+ *generator.NewFamilyGenerator(
+ descPersistentVolumeClaimRefName,
+ descPersistentVolumeClaimRefHelp,
+ metric.Gauge,
+ "",
+ wrapPersistentVolumeFunc(func(p *v1.PersistentVolume) *metric.Family {
+ claimRef := p.Spec.ClaimRef
+
+ if claimRef == nil {
+ return &metric.Family{
+ Metrics: []*metric.Metric{},
+ }
+ }
+ return &metric.Family{
+ Metrics: []*metric.Metric{
+ {
+ LabelKeys: []string{
+ "name",
+ "claim_namespace",
+ },
+ LabelValues: []string{
+ p.Spec.ClaimRef.Name,
+ p.Spec.ClaimRef.Namespace,
+ },
+ Value: 1,
+ },
+ },
+ }
+ }),
+ ),
*generator.NewFamilyGenerator(
descPersistentVolumeLabelsName,
descPersistentVolumeLabelsHelp,
diff --git a/internal/store/persistentvolume_test.go b/internal/store/persistentvolume_test.go
index f2d6b475c1..16b46f7838 100644
--- a/internal/store/persistentvolume_test.go
+++ b/internal/store/persistentvolume_test.go
@@ -399,6 +399,46 @@ func TestPersistentVolumeStore(t *testing.T) {
`,
MetricNames: []string{"kube_persistentvolume_labels"},
},
+ {
+ Obj: &v1.PersistentVolume{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-claimed-pv",
+ },
+ Status: v1.PersistentVolumeStatus{
+ Phase: v1.VolumePending,
+ },
+ Spec: v1.PersistentVolumeSpec{
+ StorageClassName: "test",
+ ClaimRef: &v1.ObjectReference{
+ APIVersion: "v1",
+ Kind: "PersistentVolumeClaim",
+ Name: "pv-claim",
+ Namespace: "default",
+ },
+ },
+ },
+ Want: `
+ # HELP kube_persistentvolume_claim_ref Information about the Persitant Volume Claim Reference.
+ # TYPE kube_persistentvolume_claim_ref gauge
+ kube_persistentvolume_claim_ref{claim_namespace="default",name="pv-claim",persistentvolume="test-claimed-pv"} 1
+ `,
+ MetricNames: []string{"kube_persistentvolume_claim_ref"},
+ },
+ {
+ Obj: &v1.PersistentVolume{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-unclaimed-pv",
+ },
+ Status: v1.PersistentVolumeStatus{
+ Phase: v1.VolumeAvailable,
+ },
+ },
+ Want: `
+ # HELP kube_persistentvolume_claim_ref Information about the Persitant Volume Claim Reference.
+ # TYPE kube_persistentvolume_claim_ref gauge
+ `,
+ MetricNames: []string{"kube_persistentvolume_claim_ref"},
+ },
{
Obj: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
diff --git a/internal/store/pod.go b/internal/store/pod.go
index 0a0a744d12..4dd352a078 100644
--- a/internal/store/pod.go
+++ b/internal/store/pod.go
@@ -824,114 +824,6 @@ var (
}
}),
),
- *generator.NewFamilyGenerator(
- "kube_pod_container_resource_requests_cpu_cores",
- "The number of CPU cores requested by a container.",
- metric.Gauge,
- "",
- wrapPodFunc(func(p *v1.Pod) *metric.Family {
- ms := []*metric.Metric{}
-
- for _, c := range p.Spec.Containers {
- req := c.Resources.Requests
-
- for resourceName, val := range req {
- if resourceName == v1.ResourceCPU {
- ms = append(ms, &metric.Metric{
- LabelKeys: []string{"container"},
- LabelValues: []string{c.Name},
- Value: float64(val.MilliValue()) / 1000,
- })
- }
- }
- }
-
- return &metric.Family{
- Metrics: ms,
- }
- }),
- ),
- *generator.NewFamilyGenerator(
- "kube_pod_container_resource_requests_memory_bytes",
- "Bytes of memory requested by a container.",
- metric.Gauge,
- "",
- wrapPodFunc(func(p *v1.Pod) *metric.Family {
- ms := []*metric.Metric{}
-
- for _, c := range p.Spec.Containers {
- req := c.Resources.Requests
-
- for resourceName, val := range req {
- if resourceName == v1.ResourceMemory {
- ms = append(ms, &metric.Metric{
- LabelKeys: []string{"container"},
- LabelValues: []string{c.Name},
- Value: float64(val.Value()),
- })
- }
- }
- }
-
- return &metric.Family{
- Metrics: ms,
- }
- }),
- ),
- *generator.NewFamilyGenerator(
- "kube_pod_container_resource_requests_storage_bytes",
- "Bytes of storage requested by a container.",
- metric.Gauge,
- "",
- wrapPodFunc(func(p *v1.Pod) *metric.Family {
- ms := []*metric.Metric{}
-
- for _, c := range p.Spec.Containers {
- req := c.Resources.Requests
-
- for resourceName, val := range req {
- if resourceName == v1.ResourceStorage {
- ms = append(ms, &metric.Metric{
- LabelKeys: []string{"container"},
- LabelValues: []string{c.Name},
- Value: float64(val.Value()),
- })
- }
- }
- }
-
- return &metric.Family{
- Metrics: ms,
- }
- }),
- ),
- *generator.NewFamilyGenerator(
- "kube_pod_container_resource_requests_ephemeral_storage_bytes",
- "Bytes of ephemeral-storage requested by a container.",
- metric.Gauge,
- "",
- wrapPodFunc(func(p *v1.Pod) *metric.Family {
- ms := []*metric.Metric{}
-
- for _, c := range p.Spec.Containers {
- req := c.Resources.Requests
-
- for resourceName, val := range req {
- if resourceName == v1.ResourceEphemeralStorage {
- ms = append(ms, &metric.Metric{
- LabelKeys: []string{"container"},
- LabelValues: []string{c.Name},
- Value: float64(val.Value()),
- })
- }
- }
- }
-
- return &metric.Family{
- Metrics: ms,
- }
- }),
- ),
*generator.NewFamilyGenerator(
"kube_pod_container_resource_requests",
"The number of requested request resource by a container.",
@@ -944,137 +836,46 @@ var (
req := c.Resources.Requests
for resourceName, val := range req {
- if isHugePageResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, sanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- Value: float64(val.Value()),
- })
- }
- if isAttachableVolumeResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, sanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
- Value: float64(val.Value()),
- })
- }
- if isExtendedResourceName(resourceName) {
- ms = append(ms, &metric.Metric{
- LabelValues: []string{c.Name, sanitizeLabelName(string(resourceName)), string(constant.UnitInteger)},
- Value: float64(val.Value()),
- })
- }
- }
- }
-
- for _, metric := range ms {
- metric.LabelKeys = []string{"container", "resource", "unit"}
- }
-
- return &metric.Family{
- Metrics: ms,
- }
- }),
- ),
- *generator.NewFamilyGenerator(
- "kube_pod_container_resource_limits_cpu_cores",
- "The number of CPU cores requested limit by a container.",
- metric.Gauge,
- "",
- wrapPodFunc(func(p *v1.Pod) *metric.Family {
- ms := []*metric.Metric{}
-
- for _, c := range p.Spec.Containers {
- req := c.Resources.Limits
-
- for resourceName, val := range req {
- if resourceName == v1.ResourceCPU {
+ switch resourceName {
+ case v1.ResourceCPU:
ms = append(ms, &metric.Metric{
- LabelKeys: []string{"container"},
- LabelValues: []string{c.Name},
+ LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitCore)},
Value: float64(val.MilliValue()) / 1000,
})
- }
- }
- }
-
- return &metric.Family{
- Metrics: ms,
- }
- }),
- ),
- *generator.NewFamilyGenerator(
- "kube_pod_container_resource_limits_memory_bytes",
- "Bytes of memory requested limit by a container.",
- metric.Gauge,
- "",
- wrapPodFunc(func(p *v1.Pod) *metric.Family {
- ms := []*metric.Metric{}
-
- for _, c := range p.Spec.Containers {
- req := c.Resources.Limits
-
- for resourceName, val := range req {
- if resourceName == v1.ResourceMemory {
+ case v1.ResourceStorage:
+ fallthrough
+ case v1.ResourceEphemeralStorage:
+ fallthrough
+ case v1.ResourceMemory:
ms = append(ms, &metric.Metric{
- LabelKeys: []string{"container"},
- LabelValues: []string{c.Name},
+ LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
Value: float64(val.Value()),
})
+ default:
+ if isHugePageResourceName(resourceName) {
+ ms = append(ms, &metric.Metric{
+ LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
+ Value: float64(val.Value()),
+ })
+ }
+ if isAttachableVolumeResourceName(resourceName) {
+ ms = append(ms, &metric.Metric{
+ LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitByte)},
+ Value: float64(val.Value()),
+ })
+ }
+ if isExtendedResourceName(resourceName) {
+ ms = append(ms, &metric.Metric{
+ LabelValues: []string{c.Name, p.Spec.NodeName, sanitizeLabelName(string(resourceName)), string(constant.UnitInteger)},
+ Value: float64(val.Value()),
+ })
+ }
}
}
}
- return &metric.Family{
- Metrics: ms,
- }
- }),
- ),
- *generator.NewFamilyGenerator(
- "kube_pod_container_resource_limits_storage_bytes",
- "Bytes of storage requested limit by a container.",
- metric.Gauge,
- "",
- wrapPodFunc(func(p *v1.Pod) *metric.Family {
- ms := []*metric.Metric{}
-
- for _, c := range p.Spec.Containers {
- req := c.Resources.Limits
-
- for resourceName, val := range req {
- if resourceName == v1.ResourceStorage {
- ms = append(ms, &metric.Metric{
- LabelKeys: []string{"container"},
- LabelValues: []string{c.Name},
- Value: float64(val.Value()),
- })
- }
- }
- }
-
- return &metric.Family{
- Metrics: ms,
- }
- }),
- ),
- *generator.NewFamilyGenerator(
- "kube_pod_container_resource_limits_ephemeral_storage_bytes",
- "Bytes of ephemeral-storage requested limit by a container.",
- metric.Gauge,
- "",
- wrapPodFunc(func(p *v1.Pod) *metric.Family {
- ms := []*metric.Metric{}
-
- for _, c := range p.Spec.Containers {
- req := c.Resources.Limits
-
- for resourceName, val := range req {
- if resourceName == v1.ResourceEphemeralStorage {
- ms = append(ms, &metric.Metric{
- LabelKeys: []string{"container"},
- LabelValues: []string{c.Name},
- Value: float64(val.Value()),
- })
- }
- }
+ for _, metric := range ms {
+ metric.LabelKeys = []string{"container", "node", "resource", "unit"}
}
return &metric.Family{
@@ -1511,6 +1312,27 @@ var (
}
}
+ return &metric.Family{
+ Metrics: ms,
+ }
+ }),
+ ),
+ *generator.NewFamilyGenerator(
+ "kube_pod_runtimeclass_name_info",
+ "The runtimeclass associated with the pod.",
+ metric.Gauge,
+ "",
+ wrapPodFunc(func(p *v1.Pod) *metric.Family {
+ ms := []*metric.Metric{}
+
+ if p.Spec.RuntimeClassName != nil {
+ ms = append(ms, &metric.Metric{
+ LabelKeys: []string{"runtimeclass_name"},
+ LabelValues: []string{*p.Spec.RuntimeClassName},
+ Value: 1,
+ })
+ }
+
return &metric.Family{
Metrics: ms,
}
diff --git a/internal/store/pod_test.go b/internal/store/pod_test.go
index 1b7d2183f6..4d66286bab 100644
--- a/internal/store/pod_test.go
+++ b/internal/store/pod_test.go
@@ -29,6 +29,7 @@ import (
func TestPodStore(t *testing.T) {
var test = true
+ runtimeclass := "foo"
startTime := 1501569018
metav1StartTime := metav1.Unix(int64(startTime), 0)
@@ -1376,78 +1377,53 @@ kube_pod_container_status_last_terminated_reason{container="container7",namespac
},
},
Want: `
- # HELP kube_pod_container_resource_limits The number of requested limit resource by a container.
- # HELP kube_pod_container_resource_limits_cpu_cores The number of CPU cores requested limit by a container.
- # HELP kube_pod_container_resource_limits_ephemeral_storage_bytes Bytes of ephemeral-storage requested limit by a container.
- # HELP kube_pod_container_resource_limits_memory_bytes Bytes of memory requested limit by a container.
- # HELP kube_pod_container_resource_limits_storage_bytes Bytes of storage requested limit by a container.
- # HELP kube_pod_container_resource_requests The number of requested request resource by a container.
- # HELP kube_pod_container_resource_requests_cpu_cores The number of CPU cores requested by a container.
- # HELP kube_pod_container_resource_requests_ephemeral_storage_bytes Bytes of ephemeral-storage requested by a container.
- # HELP kube_pod_container_resource_requests_memory_bytes Bytes of memory requested by a container.
- # HELP kube_pod_container_resource_requests_storage_bytes Bytes of storage requested by a container.
- # HELP kube_pod_init_container_resource_limits The number of requested limit resource by an init container.
- # HELP kube_pod_init_container_resource_limits_cpu_cores The number of CPU cores requested limit by an init container.
- # HELP kube_pod_init_container_resource_limits_ephemeral_storage_bytes Bytes of ephemeral-storage requested limit by an init container.
- # HELP kube_pod_init_container_resource_limits_memory_bytes Bytes of memory requested limit by an init container.
- # HELP kube_pod_init_container_resource_limits_storage_bytes Bytes of storage requested limit by an init container.
- # HELP kube_pod_init_container_resource_requests The number of requested request resource by an init container.
- # HELP kube_pod_init_container_resource_requests_cpu_cores The number of CPU cores requested by an init container.
- # HELP kube_pod_init_container_resource_requests_ephemeral_storage_bytes Bytes of ephemeral-storage requested by an init container.
- # HELP kube_pod_init_container_resource_requests_memory_bytes Bytes of memory requested by an init container.
- # HELP kube_pod_init_container_resource_requests_storage_bytes Bytes of storage requested by an init container.
- # HELP kube_pod_init_container_status_last_terminated_reason Describes the last reason the init container was in terminated state.
- # TYPE kube_pod_container_resource_limits gauge
- # TYPE kube_pod_container_resource_limits_cpu_cores gauge
- # TYPE kube_pod_container_resource_limits_ephemeral_storage_bytes gauge
- # TYPE kube_pod_container_resource_limits_memory_bytes gauge
- # TYPE kube_pod_container_resource_limits_storage_bytes gauge
- # TYPE kube_pod_container_resource_requests gauge
- # TYPE kube_pod_container_resource_requests_cpu_cores gauge
- # TYPE kube_pod_container_resource_requests_ephemeral_storage_bytes gauge
- # TYPE kube_pod_container_resource_requests_memory_bytes gauge
- # TYPE kube_pod_container_resource_requests_storage_bytes gauge
- # TYPE kube_pod_init_container_resource_limits gauge
- # TYPE kube_pod_init_container_resource_limits_cpu_cores gauge
- # TYPE kube_pod_init_container_resource_limits_ephemeral_storage_bytes gauge
- # TYPE kube_pod_init_container_resource_limits_memory_bytes gauge
- # TYPE kube_pod_init_container_resource_limits_storage_bytes gauge
- # TYPE kube_pod_init_container_resource_requests gauge
- # TYPE kube_pod_init_container_resource_requests_cpu_cores gauge
- # TYPE kube_pod_init_container_resource_requests_ephemeral_storage_bytes gauge
- # TYPE kube_pod_init_container_resource_requests_memory_bytes gauge
- # TYPE kube_pod_init_container_resource_requests_storage_bytes gauge
- # TYPE kube_pod_init_container_status_last_terminated_reason gauge
- kube_pod_container_resource_limits_cpu_cores{container="pod1_con1",namespace="ns1",pod="pod1"} 0.2
- kube_pod_container_resource_limits_cpu_cores{container="pod1_con2",namespace="ns1",pod="pod1"} 0.3
- kube_pod_container_resource_limits_ephemeral_storage_bytes{container="pod1_con1",namespace="ns1",pod="pod1"} 3e+08
- kube_pod_container_resource_limits_memory_bytes{container="pod1_con1",namespace="ns1",pod="pod1"} 1e+08
- kube_pod_container_resource_limits_memory_bytes{container="pod1_con2",namespace="ns1",pod="pod1"} 2e+08
- kube_pod_container_resource_limits_storage_bytes{container="pod1_con1",namespace="ns1",pod="pod1"} 4e+08
- kube_pod_container_resource_limits{container="pod1_con1",namespace="ns1",pod="pod1",resource="nvidia_com_gpu",unit="integer"} 1
- kube_pod_container_resource_requests_cpu_cores{container="pod1_con1",namespace="ns1",pod="pod1"} 0.2
- kube_pod_container_resource_requests_cpu_cores{container="pod1_con2",namespace="ns1",pod="pod1"} 0.3
- kube_pod_container_resource_requests_ephemeral_storage_bytes{container="pod1_con1",namespace="ns1",pod="pod1"} 3e+08
- kube_pod_container_resource_requests_memory_bytes{container="pod1_con1",namespace="ns1",pod="pod1"} 1e+08
- kube_pod_container_resource_requests_memory_bytes{container="pod1_con2",namespace="ns1",pod="pod1"} 2e+08
- kube_pod_container_resource_requests_storage_bytes{container="pod1_con1",namespace="ns1",pod="pod1"} 4e+08
- kube_pod_container_resource_requests{container="pod1_con1",namespace="ns1",pod="pod1",resource="nvidia_com_gpu",unit="integer"} 1
- kube_pod_init_container_resource_limits_cpu_cores{container="pod1_initcon1",namespace="ns1",pod="pod1"} 0.2
- kube_pod_init_container_resource_limits_ephemeral_storage_bytes{container="pod1_initcon1",namespace="ns1",pod="pod1"} 3e+08
- kube_pod_init_container_resource_limits_memory_bytes{container="pod1_initcon1",namespace="ns1",pod="pod1"} 1e+08
- kube_pod_init_container_resource_limits_storage_bytes{container="pod1_initcon1",namespace="ns1",pod="pod1"} 4e+08
- kube_pod_init_container_resource_limits{container="pod1_initcon1",namespace="ns1",pod="pod1",resource="nvidia_com_gpu",unit="integer"} 1
- kube_pod_init_container_resource_requests_cpu_cores{container="pod1_initcon1",namespace="ns1",pod="pod1"} 0.2
- kube_pod_init_container_resource_requests_ephemeral_storage_bytes{container="pod1_initcon1",namespace="ns1",pod="pod1"} 3e+08
- kube_pod_init_container_resource_requests_memory_bytes{container="pod1_initcon1",namespace="ns1",pod="pod1"} 1e+08
- kube_pod_init_container_resource_requests_storage_bytes{container="pod1_initcon1",namespace="ns1",pod="pod1"} 4e+08
- kube_pod_init_container_resource_requests{container="pod1_initcon1",namespace="ns1",pod="pod1",resource="nvidia_com_gpu",unit="integer"} 1
+ # HELP kube_pod_container_resource_limits The number of requested limit resource by a container.
+ # HELP kube_pod_container_resource_requests The number of requested request resource by a container.
+ # HELP kube_pod_init_container_resource_limits The number of requested limit resource by an init container.
+ # HELP kube_pod_init_container_resource_limits_cpu_cores The number of CPU cores requested limit by an init container.
+ # HELP kube_pod_init_container_resource_limits_ephemeral_storage_bytes Bytes of ephemeral-storage requested limit by an init container.
+ # HELP kube_pod_init_container_resource_limits_memory_bytes Bytes of memory requested limit by an init container.
+ # HELP kube_pod_init_container_resource_limits_storage_bytes Bytes of storage requested limit by an init container.
+ # HELP kube_pod_init_container_resource_requests The number of requested request resource by an init container.
+ # HELP kube_pod_init_container_resource_requests_cpu_cores The number of CPU cores requested by an init container.
+ # HELP kube_pod_init_container_resource_requests_ephemeral_storage_bytes Bytes of ephemeral-storage requested by an init container.
+ # HELP kube_pod_init_container_resource_requests_memory_bytes Bytes of memory requested by an init container.
+ # HELP kube_pod_init_container_resource_requests_storage_bytes Bytes of storage requested by an init container.
+ # HELP kube_pod_init_container_status_last_terminated_reason Describes the last reason the init container was in terminated state.
+ # TYPE kube_pod_container_resource_limits gauge
+ # TYPE kube_pod_container_resource_requests gauge
+ # TYPE kube_pod_init_container_resource_limits gauge
+ # TYPE kube_pod_init_container_resource_limits_cpu_cores gauge
+ # TYPE kube_pod_init_container_resource_limits_ephemeral_storage_bytes gauge
+ # TYPE kube_pod_init_container_resource_limits_memory_bytes gauge
+ # TYPE kube_pod_init_container_resource_limits_storage_bytes gauge
+ # TYPE kube_pod_init_container_resource_requests gauge
+ # TYPE kube_pod_init_container_resource_requests_cpu_cores gauge
+ # TYPE kube_pod_init_container_resource_requests_ephemeral_storage_bytes gauge
+ # TYPE kube_pod_init_container_resource_requests_memory_bytes gauge
+ # TYPE kube_pod_init_container_resource_requests_storage_bytes gauge
+ # TYPE kube_pod_init_container_status_last_terminated_reason gauge
+ kube_pod_container_resource_limits{container="pod1_con1",namespace="ns1",pod="pod1",resource="nvidia_com_gpu",unit="integer"} 1
+ kube_pod_container_resource_requests{container="pod1_con1",namespace="ns1",node="",pod="pod1",resource="cpu",unit="core"} 0.2
+ kube_pod_container_resource_requests{container="pod1_con1",namespace="ns1",node="",pod="pod1",resource="ephemeral_storage",unit="byte"} 3e+08
+ kube_pod_container_resource_requests{container="pod1_con1",namespace="ns1",node="",pod="pod1",resource="memory",unit="byte"} 1e+08
+ kube_pod_container_resource_requests{container="pod1_con1",namespace="ns1",node="",pod="pod1",resource="nvidia_com_gpu",unit="integer"} 1
+ kube_pod_container_resource_requests{container="pod1_con1",namespace="ns1",node="",pod="pod1",resource="storage",unit="byte"} 4e+08
+ kube_pod_container_resource_requests{container="pod1_con2",namespace="ns1",node="",pod="pod1",resource="cpu",unit="core"} 0.3
+ kube_pod_container_resource_requests{container="pod1_con2",namespace="ns1",node="",pod="pod1",resource="memory",unit="byte"} 2e+08
+ kube_pod_init_container_resource_limits_cpu_cores{container="pod1_initcon1",namespace="ns1",pod="pod1"} 0.2
+ kube_pod_init_container_resource_limits_ephemeral_storage_bytes{container="pod1_initcon1",namespace="ns1",pod="pod1"} 3e+08
+ kube_pod_init_container_resource_limits_memory_bytes{container="pod1_initcon1",namespace="ns1",pod="pod1"} 1e+08
+ kube_pod_init_container_resource_limits_storage_bytes{container="pod1_initcon1",namespace="ns1",pod="pod1"} 4e+08
+ kube_pod_init_container_resource_limits{container="pod1_initcon1",namespace="ns1",pod="pod1",resource="nvidia_com_gpu",unit="integer"} 1
+ kube_pod_init_container_resource_requests_cpu_cores{container="pod1_initcon1",namespace="ns1",pod="pod1"} 0.2
+ kube_pod_init_container_resource_requests_ephemeral_storage_bytes{container="pod1_initcon1",namespace="ns1",pod="pod1"} 3e+08
+ kube_pod_init_container_resource_requests_memory_bytes{container="pod1_initcon1",namespace="ns1",pod="pod1"} 1e+08
+ kube_pod_init_container_resource_requests_storage_bytes{container="pod1_initcon1",namespace="ns1",pod="pod1"} 4e+08
+ kube_pod_init_container_resource_requests{container="pod1_initcon1",namespace="ns1",pod="pod1",resource="nvidia_com_gpu",unit="integer"} 1
`,
MetricNames: []string{
"kube_pod_container_resource_requests",
- "kube_pod_container_resource_requests_memory_bytes",
- "kube_pod_container_resource_requests_storage_bytes",
- "kube_pod_container_resource_requests_ephemeral_storage_bytes",
"kube_pod_container_resource_limits",
"kube_pod_init_container_resource_limits",
"kube_pod_init_container_resource_requests",
@@ -1512,34 +1488,14 @@ kube_pod_container_status_last_terminated_reason{container="container7",namespac
},
},
Want: `
- # HELP kube_pod_container_resource_limits_cpu_cores The number of CPU cores requested limit by a container.
- # HELP kube_pod_container_resource_limits_memory_bytes Bytes of memory requested limit by a container.
- # HELP kube_pod_container_resource_requests_cpu_cores The number of CPU cores requested by a container.
- # HELP kube_pod_container_resource_requests_memory_bytes Bytes of memory requested by a container.
- # HELP kube_pod_init_container_resource_limits_cpu_cores The number of CPU cores requested limit by an init container.
- # HELP kube_pod_init_container_resource_limits_memory_bytes Bytes of memory requested limit by an init container.
- # TYPE kube_pod_container_resource_limits_cpu_cores gauge
- # TYPE kube_pod_container_resource_limits_memory_bytes gauge
- # TYPE kube_pod_container_resource_requests_cpu_cores gauge
- # TYPE kube_pod_container_resource_requests_memory_bytes gauge
- # TYPE kube_pod_init_container_resource_limits_cpu_cores gauge
- # TYPE kube_pod_init_container_resource_limits_memory_bytes gauge
- kube_pod_container_resource_requests_cpu_cores{container="pod2_con1",namespace="ns2",pod="pod2"} 0.4
- kube_pod_container_resource_requests_cpu_cores{container="pod2_con2",namespace="ns2",pod="pod2"} 0.5
- kube_pod_container_resource_requests_memory_bytes{container="pod2_con1",namespace="ns2",pod="pod2"} 3e+08
- kube_pod_container_resource_requests_memory_bytes{container="pod2_con2",namespace="ns2",pod="pod2"} 4e+08
- kube_pod_container_resource_limits_cpu_cores{container="pod2_con1",namespace="ns2",pod="pod2"} 0.4
- kube_pod_container_resource_limits_cpu_cores{container="pod2_con2",namespace="ns2",pod="pod2"} 0.5
- kube_pod_container_resource_limits_memory_bytes{container="pod2_con1",namespace="ns2",pod="pod2"} 3e+08
- kube_pod_container_resource_limits_memory_bytes{container="pod2_con2",namespace="ns2",pod="pod2"} 4e+08
- kube_pod_init_container_resource_limits_cpu_cores{container="pod2_initcon1",namespace="ns2",pod="pod2"} 0.4
- kube_pod_init_container_resource_limits_memory_bytes{container="pod2_initcon1",namespace="ns2",pod="pod2"} 3e+08
+ # HELP kube_pod_init_container_resource_limits_cpu_cores The number of CPU cores requested limit by an init container.
+ # HELP kube_pod_init_container_resource_limits_memory_bytes Bytes of memory requested limit by an init container.
+ # TYPE kube_pod_init_container_resource_limits_cpu_cores gauge
+ # TYPE kube_pod_init_container_resource_limits_memory_bytes gauge
+ kube_pod_init_container_resource_limits_cpu_cores{container="pod2_initcon1",namespace="ns2",pod="pod2"} 0.4
+ kube_pod_init_container_resource_limits_memory_bytes{container="pod2_initcon1",namespace="ns2",pod="pod2"} 3e+08
`,
MetricNames: []string{
- "kube_pod_container_resource_requests_cpu_cores",
- "kube_pod_container_resource_requests_memory_bytes",
- "kube_pod_container_resource_limits_cpu_cores",
- "kube_pod_container_resource_limits_memory_bytes",
"kube_pod_init_container_resource_limits_cpu_cores",
"kube_pod_init_container_resource_limits_memory_bytes",
},
@@ -1620,6 +1576,28 @@ kube_pod_container_status_last_terminated_reason{container="container7",namespac
"kube_pod_spec_volumes_persistentvolumeclaims_readonly",
},
},
+ {
+ Obj: &v1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pod1",
+ Namespace: "ns1",
+ Labels: map[string]string{
+ "app": "example",
+ },
+ },
+ Spec: v1.PodSpec{
+ RuntimeClassName: &runtimeclass,
+ },
+ },
+ Want: `
+ # HELP kube_pod_runtimeclass_name_info The runtimeclass associated with the pod.
+ # TYPE kube_pod_runtimeclass_name_info gauge
+ kube_pod_runtimeclass_name_info{namespace="ns1",pod="pod1",runtimeclass_name="foo"} 1
+ `,
+ MetricNames: []string{
+ "kube_pod_runtimeclass_name_info",
+ },
+ },
}
for i, c := range cases {
@@ -1695,7 +1673,7 @@ func BenchmarkPodStore(b *testing.B) {
},
}
- expectedFamilies := 57
+ expectedFamilies := 50
for n := 0; n < b.N; n++ {
families := f(pod)
if len(families) != expectedFamilies {
diff --git a/jsonnet/kube-state-metrics/jsonnetfile.json b/jsonnet/kube-state-metrics/jsonnetfile.json
index d31ac11001..677d98118f 100644
--- a/jsonnet/kube-state-metrics/jsonnetfile.json
+++ b/jsonnet/kube-state-metrics/jsonnetfile.json
@@ -1,14 +1,3 @@
{
- "dependencies": [
- {
- "name": "ksonnet",
- "source": {
- "git": {
- "remote": "https://github.com/ksonnet/ksonnet-lib",
- "subdir": ""
- }
- },
- "version": "master"
- }
- ]
+ "dependencies": []
}
diff --git a/jsonnet/kube-state-metrics/kube-state-metrics.libsonnet b/jsonnet/kube-state-metrics/kube-state-metrics.libsonnet
index ecd28e468f..938040371e 100644
--- a/jsonnet/kube-state-metrics/kube-state-metrics.libsonnet
+++ b/jsonnet/kube-state-metrics/kube-state-metrics.libsonnet
@@ -1,5 +1,3 @@
-local k = import 'ksonnet/ksonnet.beta.4/k.libsonnet';
-
{
local ksm = self,
name:: error 'must set namespace',
@@ -19,247 +17,301 @@ local k = import 'ksonnet/ksonnet.beta.4/k.libsonnet';
},
clusterRoleBinding:
- local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
-
- clusterRoleBinding.new() +
- clusterRoleBinding.mixin.metadata.withName(ksm.name) +
- clusterRoleBinding.mixin.metadata.withLabels(ksm.commonLabels) +
- clusterRoleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
- clusterRoleBinding.mixin.roleRef.withName(ksm.name) +
- clusterRoleBinding.mixin.roleRef.mixinInstance({ kind: 'ClusterRole' }) +
- clusterRoleBinding.withSubjects([{ kind: 'ServiceAccount', name: ksm.name, namespace: ksm.namespace }]),
+ {
+ apiVersion: 'rbac.authorization.k8s.io/v1',
+ kind: 'ClusterRoleBinding',
+ metadata: {
+ name: ksm.name,
+ labels: ksm.commonLabels,
+ },
+ roleRef: {
+ apiGroup: 'rbac.authorization.k8s.io',
+ kind: 'ClusterRole',
+ name: ksm.name,
+ },
+ subjects: [{
+ kind: 'ServiceAccount',
+ name: ksm.name,
+ namespace: ksm.namespace,
+ }],
+ },
clusterRole:
- local clusterRole = k.rbac.v1.clusterRole;
- local rulesType = clusterRole.rulesType;
-
local rules = [
- rulesType.new() +
- rulesType.withApiGroups(['']) +
- rulesType.withResources([
- 'configmaps',
- 'secrets',
- 'nodes',
- 'pods',
- 'services',
- 'resourcequotas',
- 'replicationcontrollers',
- 'limitranges',
- 'persistentvolumeclaims',
- 'persistentvolumes',
- 'namespaces',
- 'endpoints',
- ]) +
- rulesType.withVerbs(['list', 'watch']),
-
- rulesType.new() +
- rulesType.withApiGroups(['extensions']) +
- rulesType.withResources([
- 'daemonsets',
- 'deployments',
- 'replicasets',
- 'ingresses',
- ]) +
- rulesType.withVerbs(['list', 'watch']),
-
- rulesType.new() +
- rulesType.withApiGroups(['apps']) +
- rulesType.withResources([
- 'statefulsets',
- 'daemonsets',
- 'deployments',
- 'replicasets',
- ]) +
- rulesType.withVerbs(['list', 'watch']),
-
- rulesType.new() +
- rulesType.withApiGroups(['batch']) +
- rulesType.withResources([
- 'cronjobs',
- 'jobs',
- ]) +
- rulesType.withVerbs(['list', 'watch']),
-
- rulesType.new() +
- rulesType.withApiGroups(['autoscaling']) +
- rulesType.withResources([
- 'horizontalpodautoscalers',
- ]) +
- rulesType.withVerbs(['list', 'watch']),
-
- rulesType.new() +
- rulesType.withApiGroups(['authentication.k8s.io']) +
- rulesType.withResources([
- 'tokenreviews',
- ]) +
- rulesType.withVerbs(['create']),
-
- rulesType.new() +
- rulesType.withApiGroups(['authorization.k8s.io']) +
- rulesType.withResources([
- 'subjectaccessreviews',
- ]) +
- rulesType.withVerbs(['create']),
-
- rulesType.new() +
- rulesType.withApiGroups(['policy']) +
- rulesType.withResources([
- 'poddisruptionbudgets',
- ]) +
- rulesType.withVerbs(['list', 'watch']),
-
- rulesType.new() +
- rulesType.withApiGroups(['certificates.k8s.io']) +
- rulesType.withResources([
- 'certificatesigningrequests',
- ]) +
- rulesType.withVerbs(['list', 'watch']),
-
- rulesType.new() +
- rulesType.withApiGroups(['storage.k8s.io']) +
- rulesType.withResources([
- 'storageclasses',
- 'volumeattachments',
- ]) +
- rulesType.withVerbs(['list', 'watch']),
-
- rulesType.new() +
- rulesType.withApiGroups(['admissionregistration.k8s.io']) +
- rulesType.withResources([
- 'mutatingwebhookconfigurations',
- 'validatingwebhookconfigurations',
- ]) +
- rulesType.withVerbs(['list', 'watch']),
-
- rulesType.new() +
- rulesType.withApiGroups(['networking.k8s.io']) +
- rulesType.withResources([
- 'networkpolicies',
- ]) +
- rulesType.withVerbs(['list', 'watch']),
-
- rulesType.new() +
- rulesType.withApiGroups(['coordination.k8s.io']) +
- rulesType.withResources([
- 'leases',
- ]) +
- rulesType.withVerbs(['list', 'watch']),
+ {
+ apiGroups: [''],
+ resources: [
+ 'configmaps',
+ 'secrets',
+ 'nodes',
+ 'pods',
+ 'services',
+ 'resourcequotas',
+ 'replicationcontrollers',
+ 'limitranges',
+ 'persistentvolumeclaims',
+ 'persistentvolumes',
+ 'namespaces',
+ 'endpoints',
+ ],
+ verbs: ['list', 'watch'],
+ },
+ {
+ apiGroups: ['extensions'],
+ resources: [
+ 'daemonsets',
+ 'deployments',
+ 'replicasets',
+ ],
+ verbs: ['list', 'watch'],
+ },
+ {
+ apiGroups: ['apps'],
+ resources: [
+ 'statefulsets',
+ 'daemonsets',
+ 'deployments',
+ 'replicasets',
+ ],
+ verbs: ['list', 'watch'],
+ },
+ {
+ apiGroups: ['batch'],
+ resources: [
+ 'cronjobs',
+ 'jobs',
+ ],
+ verbs: ['list', 'watch'],
+ },
+ {
+ apiGroups: ['autoscaling'],
+ resources: [
+ 'horizontalpodautoscalers',
+ ],
+ verbs: ['list', 'watch'],
+ },
+ {
+ apiGroups: ['authentication.k8s.io'],
+ resources: [
+ 'tokenreviews',
+ ],
+ verbs: ['create'],
+ },
+ {
+ apiGroups: ['authorization.k8s.io'],
+ resources: [
+ 'subjectaccessreviews',
+ ],
+ verbs: ['create'],
+ },
+ {
+ apiGroups: ['policy'],
+ resources: [
+ 'poddisruptionbudgets',
+ ],
+ verbs: ['list', 'watch'],
+ },
+ {
+ apiGroups: ['certificates.k8s.io'],
+ resources: [
+ 'certificatesigningrequests',
+ ],
+ verbs: ['list', 'watch'],
+ },
+ {
+ apiGroups: ['storage.k8s.io'],
+ resources: [
+ 'storageclasses',
+ 'volumeattachments',
+ ],
+ verbs: ['list', 'watch'],
+ },
+ {
+ apiGroups: ['admissionregistration.k8s.io'],
+ resources: [
+ 'mutatingwebhookconfigurations',
+ 'validatingwebhookconfigurations',
+ ],
+ verbs: ['list', 'watch'],
+ },
+ {
+ apiGroups: ['networking.k8s.io'],
+ resources: [
+ 'networkpolicies',
+ 'ingresses',
+ ],
+ verbs: ['list', 'watch'],
+ },
+ {
+ apiGroups: ['coordination.k8s.io'],
+ resources: [
+ 'leases',
+ ],
+ verbs: ['list', 'watch'],
+ },
];
- clusterRole.new() +
- clusterRole.mixin.metadata.withName(ksm.name) +
- clusterRole.mixin.metadata.withLabels(ksm.commonLabels) +
- clusterRole.withRules(rules),
+ {
+ apiVersion: 'rbac.authorization.k8s.io/v1',
+ kind: 'ClusterRole',
+ metadata: {
+ name: ksm.name,
+ labels: ksm.commonLabels,
+ },
+ rules: rules,
+ },
deployment:
- local deployment = k.apps.v1.deployment;
- local container = deployment.mixin.spec.template.spec.containersType;
- local volume = deployment.mixin.spec.template.spec.volumesType;
- local containerPort = container.portsType;
- local containerVolumeMount = container.volumeMountsType;
- local podSelector = deployment.mixin.spec.template.spec.selectorType;
-
- local c =
- container.new('kube-state-metrics', ksm.image) +
- container.withPorts([
- containerPort.newNamed(8080, 'http-metrics'),
- containerPort.newNamed(8081, 'telemetry'),
- ]) +
- container.mixin.livenessProbe.httpGet.withPath('/healthz') +
- container.mixin.livenessProbe.httpGet.withPort(8080) +
- container.mixin.livenessProbe.withInitialDelaySeconds(5) +
- container.mixin.livenessProbe.withTimeoutSeconds(5) +
- container.mixin.readinessProbe.httpGet.withPath('/') +
- container.mixin.readinessProbe.httpGet.withPort(8081) +
- container.mixin.readinessProbe.withInitialDelaySeconds(5) +
- container.mixin.readinessProbe.withTimeoutSeconds(5) +
- container.mixin.securityContext.withRunAsUser(65534);
-
- deployment.new(ksm.name, 1, c, ksm.commonLabels) +
- deployment.mixin.metadata.withNamespace(ksm.namespace) +
- deployment.mixin.metadata.withLabels(ksm.commonLabels) +
- deployment.mixin.spec.selector.withMatchLabels(ksm.podLabels) +
- deployment.mixin.spec.template.spec.withNodeSelector({ 'kubernetes.io/os': 'linux' }) +
- deployment.mixin.spec.template.spec.withServiceAccountName(ksm.name),
+ local c = {
+ name: 'kube-state-metrics',
+ image: ksm.image,
+ ports: [
+ { name: 'http-metrics', containerPort: 8080 },
+ { name: 'telemetry', containerPort: 8081 },
+ ],
+ securityContext: { runAsUser: 65534 },
+ livenessProbe: { timeoutSeconds: 5, initialDelaySeconds: 5, httpGet: {
+ port: 8080,
+ path: '/healthz',
+ } },
+ readinessProbe: { timeoutSeconds: 5, initialDelaySeconds: 5, httpGet: {
+ port: 8081,
+ path: '/',
+ } },
+ };
+
+ {
+ apiVersion: 'apps/v1',
+ kind: 'Deployment',
+ metadata: {
+ name: ksm.name,
+ namespace: ksm.namespace,
+ labels: ksm.commonLabels,
+ },
+ spec: {
+ replicas: 1,
+ selector: { matchLabels: ksm.podLabels },
+ template: {
+ metadata: {
+ labels: ksm.commonLabels,
+ },
+ spec: {
+ containers: [c],
+ serviceAccountName: ksm.serviceAccount.metadata.name,
+ nodeSelector: { 'kubernetes.io/os': 'linux' },
+ },
+ },
+ },
+ },
serviceAccount:
- local serviceAccount = k.core.v1.serviceAccount;
-
- serviceAccount.new(ksm.name) +
- serviceAccount.mixin.metadata.withNamespace(ksm.namespace) +
- serviceAccount.mixin.metadata.withLabels(ksm.commonLabels),
+ {
+ apiVersion: 'v1',
+ kind: 'ServiceAccount',
+ metadata: {
+ name: ksm.name,
+ namespace: ksm.namespace,
+ labels: ksm.commonLabels,
+ },
+ },
service:
- local service = k.core.v1.service;
- local servicePort = service.mixin.spec.portsType;
-
- local ksmServicePortMain = servicePort.newNamed('http-metrics', 8080, 'http-metrics');
- local ksmServicePortSelf = servicePort.newNamed('telemetry', 8081, 'telemetry');
-
- service.new(ksm.name, ksm.podLabels, [ksmServicePortMain, ksmServicePortSelf]) +
- service.mixin.metadata.withNamespace(ksm.namespace) +
- service.mixin.metadata.withLabels(ksm.commonLabels) +
- service.mixin.spec.withClusterIp('None'),
+ {
+ apiVersion: 'v1',
+ kind: 'Service',
+ metadata: {
+ name: ksm.name,
+ namespace: ksm.namespace,
+ labels: ksm.commonLabels,
+ },
+ spec: {
+ clusterIP: 'None',
+ selector: ksm.podLabels,
+ ports: [
+ { name: 'http-metrics', port: 8080, targetPort: 'http-metrics' },
+ { name: 'telemetry', port: 8081, targetPort: 'telemetry' },
+ ],
+ },
+ },
autosharding:: {
role:
- local role = k.rbac.v1.role;
- local rulesType = role.rulesType;
-
- local rules = [
- rulesType.new() +
- rulesType.withApiGroups(['']) +
- rulesType.withResources(['pods']) +
- rulesType.withVerbs(['get']),
- rulesType.new() +
- rulesType.withApiGroups(['apps']) +
- rulesType.withResources(['statefulsets']) +
- rulesType.withResourceNames([ksm.name]) +
- rulesType.withVerbs(['get']),
- ];
-
- role.new() +
- role.mixin.metadata.withName(ksm.name) +
- role.mixin.metadata.withNamespace(ksm.namespace) +
- role.mixin.metadata.withLabels(ksm.commonLabels) +
- role.withRules(rules),
+ {
+ apiVersion: 'rbac.authorization.k8s.io/v1',
+ kind: 'Role',
+ metadata: {
+ name: ksm.name,
+ namespace: ksm.namespace,
+ labels: ksm.commonLabels,
+ },
+ rules: [{
+ apiGroups: [''],
+ resources: ['pods'],
+ verbs: ['get'],
+ }, {
+ apiGroups: ['apps'],
+ resourceNames: ['kube-state-metrics'],
+ resources: ['statefulsets'],
+ verbs: ['get'],
+ }],
+ },
roleBinding:
- local roleBinding = k.rbac.v1.roleBinding;
-
- roleBinding.new() +
- roleBinding.mixin.metadata.withName(ksm.name) +
- roleBinding.mixin.metadata.withLabels(ksm.commonLabels) +
- roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
- roleBinding.mixin.roleRef.withName(ksm.name) +
- roleBinding.mixin.roleRef.withNamespace(ksm.namespace) +
- roleBinding.mixin.roleRef.mixinInstance({ kind: 'Role' }) +
- roleBinding.withSubjects([{ kind: 'ServiceAccount', name: ksm.name }]),
+ {
+ apiVersion: 'rbac.authorization.k8s.io/v1',
+ kind: 'RoleBinding',
+ metadata: {
+ name: ksm.name,
+ namespace: ksm.namespace,
+ labels: ksm.commonLabels,
+ },
+ roleRef: {
+ apiGroup: 'rbac.authorization.k8s.io',
+ kind: 'Role',
+ name: 'kube-state-metrics',
+ },
+ subjects: [{
+ kind: 'ServiceAccount',
+ name: ksm.serviceAccount.metadata.name,
+ }],
+ },
statefulset:
- local statefulset = k.apps.v1.statefulSet;
- local container = statefulset.mixin.spec.template.spec.containersType;
- local containerEnv = container.envType;
-
- local c = ksm.deployment.spec.template.spec.containers[0] +
- container.withArgs([
- '--pod=$(POD_NAME)',
- '--pod-namespace=$(POD_NAMESPACE)',
- ]) +
- container.mixin.securityContext.withRunAsUser(65534) +
- container.withEnv([
- containerEnv.new('POD_NAME') +
- containerEnv.mixin.valueFrom.fieldRef.withFieldPath('metadata.name'),
- containerEnv.new('POD_NAMESPACE') +
- containerEnv.mixin.valueFrom.fieldRef.withFieldPath('metadata.namespace'),
- ]);
-
- statefulset.new(ksm.name, 2, c, [], ksm.commonLabels) +
- statefulset.mixin.metadata.withNamespace(ksm.namespace) +
- statefulset.mixin.metadata.withLabels(ksm.commonLabels) +
- statefulset.mixin.spec.withServiceName(ksm.service.metadata.name) +
- statefulset.mixin.spec.selector.withMatchLabels(ksm.podLabels) +
- statefulset.mixin.spec.template.spec.withNodeSelector({ 'kubernetes.io/os': 'linux' }) +
- statefulset.mixin.spec.template.spec.withServiceAccountName(ksm.name),
+ // extending the default container from above
+ local c = ksm.deployment.spec.template.spec.containers[0] {
+ args: [
+ '--pod=$(POD_NAME)',
+ '--pod-namespace=$(POD_NAMESPACE)',
+ ],
+ env: [
+ { name: 'POD_NAME', valueFrom: { fieldRef: { fieldPath: 'metadata.name' } } },
+ { name: 'POD_NAMESPACE', valueFrom: { fieldRef: { fieldPath: 'metadata.namespace' } } },
+ ],
+ };
+
+ {
+ apiVersion: 'apps/v1',
+ kind: 'StatefulSet',
+ metadata: {
+ name: ksm.name,
+ namespace: ksm.namespace,
+ labels: ksm.commonLabels,
+ },
+ spec: {
+ replicas: 2,
+ selector: { matchLabels: ksm.podLabels },
+ serviceName: ksm.service.metadata.name,
+ template: {
+ metadata: {
+ labels: ksm.commonLabels,
+ },
+ spec: {
+ containers: [c],
+ serviceAccountName: ksm.serviceAccount.metadata.name,
+ nodeSelector: { 'kubernetes.io/os': 'linux' },
+ },
+ },
+ },
+ },
} + {
service: ksm.service,
serviceAccount: ksm.serviceAccount,
diff --git a/main_test.go b/main_test.go
index 719bd7ad0d..17739a7e1a 100644
--- a/main_test.go
+++ b/main_test.go
@@ -19,6 +19,7 @@ package main
import (
"bytes"
"context"
+ "fmt"
"io/ioutil"
"net/http/httptest"
"sort"
@@ -163,197 +164,177 @@ func TestFullScrapeCycle(t *testing.T) {
body, _ := ioutil.ReadAll(resp.Body)
- expected := `# HELP kube_pod_info Information about pod.
-# TYPE kube_pod_info gauge
-kube_pod_info{namespace="default",pod="pod0",host_ip="1.1.1.1",pod_ip="1.2.3.4",uid="abc-0",node="node1",created_by_kind="",created_by_name="",priority_class="",host_network="false"} 1
-# HELP kube_pod_start_time Start time in unix timestamp for a pod.
-# TYPE kube_pod_start_time gauge
+ expected := `# HELP kube_pod_completion_time Completion time in unix timestamp for a pod.
+# HELP kube_pod_container_info Information about a container in a pod.
+# HELP kube_pod_container_resource_limits The number of requested limit resource by a container.
+# HELP kube_pod_container_resource_requests The number of requested request resource by a container.
# HELP kube_pod_container_state_started Start time in unix timestamp for a pod container.
-# TYPE kube_pod_container_state_started gauge
-# HELP kube_pod_completion_time Completion time in unix timestamp for a pod.
-# TYPE kube_pod_completion_time gauge
-# HELP kube_pod_owner Information about the Pod's owner.
-# TYPE kube_pod_owner gauge
-kube_pod_owner{namespace="default",pod="pod0",owner_kind="",owner_name="",owner_is_controller=""} 1
-# HELP kube_pod_labels Kubernetes labels converted to Prometheus labels.
-# TYPE kube_pod_labels gauge
-kube_pod_labels{namespace="default",pod="pod0"} 1
+# HELP kube_pod_container_status_last_terminated_reason Describes the last reason the container was in terminated state.
+# HELP kube_pod_container_status_ready Describes whether the containers readiness check succeeded.
+# HELP kube_pod_container_status_restarts_total The number of container restarts per container.
+# HELP kube_pod_container_status_running Describes whether the container is currently in running state.
+# HELP kube_pod_container_status_terminated Describes whether the container is currently in terminated state.
+# HELP kube_pod_container_status_terminated_reason Describes the reason the container is currently in terminated state.
+# HELP kube_pod_container_status_waiting Describes whether the container is currently in waiting state.
+# HELP kube_pod_container_status_waiting_reason Describes the reason the container is currently in waiting state.
# HELP kube_pod_created Unix creation timestamp
-# TYPE kube_pod_created gauge
-kube_pod_created{namespace="default",pod="pod0"} 1.5e+09
# HELP kube_pod_deletion_timestamp Unix deletion timestamp
-# TYPE kube_pod_deletion_timestamp gauge
+# HELP kube_pod_info Information about pod.
+# HELP kube_pod_init_container_info Information about an init container in a pod.
+# HELP kube_pod_init_container_resource_limits The number of requested limit resource by an init container.
+# HELP kube_pod_init_container_resource_limits_cpu_cores The number of CPU cores requested limit by an init container.
+# HELP kube_pod_init_container_resource_limits_ephemeral_storage_bytes Bytes of ephemeral-storage requested limit by an init container.
+# HELP kube_pod_init_container_resource_limits_memory_bytes Bytes of memory requested limit by an init container.
+# HELP kube_pod_init_container_resource_limits_storage_bytes Bytes of storage requested limit by an init container.
+# HELP kube_pod_init_container_resource_requests The number of requested request resource by an init container.
+# HELP kube_pod_init_container_resource_requests_cpu_cores The number of CPU cores requested by an init container.
+# HELP kube_pod_init_container_resource_requests_ephemeral_storage_bytes Bytes of ephemeral-storage requested by an init container.
+# HELP kube_pod_init_container_resource_requests_memory_bytes Bytes of memory requested by an init container.
+# HELP kube_pod_init_container_resource_requests_storage_bytes Bytes of storage requested by an init container.
+# HELP kube_pod_init_container_status_last_terminated_reason Describes the last reason the init container was in terminated state.
+# HELP kube_pod_init_container_status_ready Describes whether the init containers readiness check succeeded.
+# HELP kube_pod_init_container_status_restarts_total The number of restarts for the init container.
+# HELP kube_pod_init_container_status_running Describes whether the init container is currently in running state.
+# HELP kube_pod_init_container_status_terminated Describes whether the init container is currently in terminated state.
+# HELP kube_pod_init_container_status_terminated_reason Describes the reason the init container is currently in terminated state.
+# HELP kube_pod_init_container_status_waiting Describes whether the init container is currently in waiting state.
+# HELP kube_pod_init_container_status_waiting_reason Describes the reason the init container is currently in waiting state.
+# HELP kube_pod_labels Kubernetes labels converted to Prometheus labels.
+# HELP kube_pod_overhead_cpu_cores The pod overhead in regards to cpu cores associated with running a pod.
+# HELP kube_pod_overhead_memory_bytes The pod overhead in regards to memory associated with running a pod.
+# HELP kube_pod_runtimeclass_name_info The runtimeclass associated with the pod.
+# HELP kube_pod_owner Information about the Pod's owner.
# HELP kube_pod_restart_policy Describes the restart policy in use by this pod.
-# TYPE kube_pod_restart_policy gauge
-kube_pod_restart_policy{namespace="default",pod="pod0",type="Always"} 1
-# HELP kube_pod_status_scheduled_time Unix timestamp when pod moved into scheduled status
-# TYPE kube_pod_status_scheduled_time gauge
-# HELP kube_pod_status_unschedulable Describes the unschedulable status for the pod.
-# TYPE kube_pod_status_unschedulable gauge
+# HELP kube_pod_spec_volumes_persistentvolumeclaims_info Information about persistentvolumeclaim volumes in a pod.
+# HELP kube_pod_spec_volumes_persistentvolumeclaims_readonly Describes whether a persistentvolumeclaim is mounted read only.
+# HELP kube_pod_start_time Start time in unix timestamp for a pod.
# HELP kube_pod_status_phase The pods current phase.
-# TYPE kube_pod_status_phase gauge
-kube_pod_status_phase{namespace="default",pod="pod0",phase="Pending"} 0
-kube_pod_status_phase{namespace="default",pod="pod0",phase="Succeeded"} 0
-kube_pod_status_phase{namespace="default",pod="pod0",phase="Failed"} 0
-kube_pod_status_phase{namespace="default",pod="pod0",phase="Unknown"} 0
-kube_pod_status_phase{namespace="default",pod="pod0",phase="Running"} 1
# HELP kube_pod_status_ready Describes whether the pod is ready to serve requests.
-# TYPE kube_pod_status_ready gauge
-# HELP kube_pod_status_scheduled Describes the status of the scheduling process for the pod.
-# TYPE kube_pod_status_scheduled gauge
# HELP kube_pod_status_reason The pod status reasons
-# TYPE kube_pod_status_reason gauge
-kube_pod_status_reason{namespace="default",pod="pod0",reason="NodeLost"} 0
-kube_pod_status_reason{namespace="default",pod="pod0",reason="Evicted"} 0
-kube_pod_status_reason{namespace="default",pod="pod0",reason="UnexpectedAdmissionError"} 0
-# HELP kube_pod_container_info Information about a container in a pod.
+# HELP kube_pod_status_scheduled Describes the status of the scheduling process for the pod.
+# HELP kube_pod_status_scheduled_time Unix timestamp when pod moved into scheduled status
+# HELP kube_pod_status_unschedulable Describes the unschedulable status for the pod.
+# TYPE kube_pod_completion_time gauge
# TYPE kube_pod_container_info gauge
-kube_pod_container_info{namespace="default",pod="pod0",container="container2",image="k8s.gcr.io/hyperkube2",image_id="docker://sha256:bbb",container_id="docker://cd456"} 1
-kube_pod_container_info{namespace="default",pod="pod0",container="container3",image="k8s.gcr.io/hyperkube3",image_id="docker://sha256:ccc",container_id="docker://ef789"} 1
-# HELP kube_pod_init_container_info Information about an init container in a pod.
-# TYPE kube_pod_init_container_info gauge
-# HELP kube_pod_container_status_waiting Describes whether the container is currently in waiting state.
+# TYPE kube_pod_container_resource_limits gauge
+# TYPE kube_pod_container_resource_requests gauge
+# TYPE kube_pod_container_state_started gauge
+# TYPE kube_pod_container_status_last_terminated_reason gauge
+# TYPE kube_pod_container_status_ready gauge
+# TYPE kube_pod_container_status_restarts_total counter
+# TYPE kube_pod_container_status_running gauge
+# TYPE kube_pod_container_status_terminated gauge
+# TYPE kube_pod_container_status_terminated_reason gauge
# TYPE kube_pod_container_status_waiting gauge
-kube_pod_container_status_waiting{namespace="default",pod="pod0",container="container2"} 1
-kube_pod_container_status_waiting{namespace="default",pod="pod0",container="container3"} 0
-# HELP kube_pod_init_container_status_waiting Describes whether the init container is currently in waiting state.
-# TYPE kube_pod_init_container_status_waiting gauge
-# HELP kube_pod_container_status_waiting_reason Describes the reason the container is currently in waiting state.
# TYPE kube_pod_container_status_waiting_reason gauge
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="ContainerCreating"} 0
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="CrashLoopBackOff"} 1
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="CreateContainerConfigError"} 0
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="ErrImagePull"} 0
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="ImagePullBackOff"} 0
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="CreateContainerError"} 0
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="InvalidImageName"} 0
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="ContainerCreating"} 0
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="CrashLoopBackOff"} 0
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="CreateContainerConfigError"} 0
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="ErrImagePull"} 0
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="ImagePullBackOff"} 0
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="CreateContainerError"} 0
-kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="InvalidImageName"} 0
-# HELP kube_pod_init_container_status_waiting_reason Describes the reason the init container is currently in waiting state.
-# TYPE kube_pod_init_container_status_waiting_reason gauge
-# HELP kube_pod_container_status_running Describes whether the container is currently in running state.
-# TYPE kube_pod_container_status_running gauge
-kube_pod_container_status_running{namespace="default",pod="pod0",container="container2"} 0
-kube_pod_container_status_running{namespace="default",pod="pod0",container="container3"} 0
-# HELP kube_pod_init_container_status_running Describes whether the init container is currently in running state.
+# TYPE kube_pod_created gauge
+# TYPE kube_pod_deletion_timestamp gauge
+# TYPE kube_pod_info gauge
+# TYPE kube_pod_init_container_info gauge
+# TYPE kube_pod_init_container_resource_limits gauge
+# TYPE kube_pod_init_container_resource_limits_cpu_cores gauge
+# TYPE kube_pod_init_container_resource_limits_ephemeral_storage_bytes gauge
+# TYPE kube_pod_init_container_resource_limits_memory_bytes gauge
+# TYPE kube_pod_init_container_resource_limits_storage_bytes gauge
+# TYPE kube_pod_init_container_resource_requests gauge
+# TYPE kube_pod_init_container_resource_requests_cpu_cores gauge
+# TYPE kube_pod_init_container_resource_requests_ephemeral_storage_bytes gauge
+# TYPE kube_pod_init_container_resource_requests_memory_bytes gauge
+# TYPE kube_pod_init_container_resource_requests_storage_bytes gauge
+# TYPE kube_pod_init_container_status_last_terminated_reason gauge
+# TYPE kube_pod_init_container_status_ready gauge
+# TYPE kube_pod_init_container_status_restarts_total counter
# TYPE kube_pod_init_container_status_running gauge
-# HELP kube_pod_container_status_terminated Describes whether the container is currently in terminated state.
-# TYPE kube_pod_container_status_terminated gauge
-kube_pod_container_status_terminated{namespace="default",pod="pod0",container="container2"} 0
-kube_pod_container_status_terminated{namespace="default",pod="pod0",container="container3"} 0
-# HELP kube_pod_init_container_status_terminated Describes whether the init container is currently in terminated state.
# TYPE kube_pod_init_container_status_terminated gauge
-# HELP kube_pod_container_status_terminated_reason Describes the reason the container is currently in terminated state.
-# TYPE kube_pod_container_status_terminated_reason gauge
-kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="OOMKilled"} 0
-kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Completed"} 0
-kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Error"} 0
-kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="ContainerCannotRun"} 0
-kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="DeadlineExceeded"} 0
-kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Evicted"} 0
-kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="OOMKilled"} 0
-kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Completed"} 0
-kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Error"} 0
-kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="ContainerCannotRun"} 0
-kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="DeadlineExceeded"} 0
-kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Evicted"} 0
-# HELP kube_pod_init_container_status_terminated_reason Describes the reason the init container is currently in terminated state.
# TYPE kube_pod_init_container_status_terminated_reason gauge
-# HELP kube_pod_container_status_last_terminated_reason Describes the last reason the container was in terminated state.
-# TYPE kube_pod_container_status_last_terminated_reason gauge
-kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container2",reason="OOMKilled"} 1
+# TYPE kube_pod_init_container_status_waiting gauge
+# TYPE kube_pod_init_container_status_waiting_reason gauge
+# TYPE kube_pod_labels gauge
+# TYPE kube_pod_overhead_cpu_cores gauge
+# TYPE kube_pod_overhead_memory_bytes gauge
+# TYPE kube_pod_runtimeclass_name_info gauge
+# TYPE kube_pod_owner gauge
+# TYPE kube_pod_restart_policy gauge
+# TYPE kube_pod_spec_volumes_persistentvolumeclaims_info gauge
+# TYPE kube_pod_spec_volumes_persistentvolumeclaims_readonly gauge
+# TYPE kube_pod_start_time gauge
+# TYPE kube_pod_status_phase gauge
+# TYPE kube_pod_status_ready gauge
+# TYPE kube_pod_status_reason gauge
+# TYPE kube_pod_status_scheduled gauge
+# TYPE kube_pod_status_scheduled_time gauge
+# TYPE kube_pod_status_unschedulable gauge
+kube_pod_container_info{namespace="default",pod="pod0",container="container2",image="k8s.gcr.io/hyperkube2",image_id="docker://sha256:bbb",container_id="docker://cd456"} 1
+kube_pod_container_info{namespace="default",pod="pod0",container="container3",image="k8s.gcr.io/hyperkube3",image_id="docker://sha256:ccc",container_id="docker://ef789"} 1
+kube_pod_container_resource_limits{namespace="default",pod="pod0",container="pod1_con1",resource="nvidia_com_gpu",unit="integer"} 1
+kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="cpu",unit="core"} 0.2
+kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="ephemeral_storage",unit="byte"} 3e+08
+kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="memory",unit="byte"} 1e+08
+kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="nvidia_com_gpu",unit="integer"} 1
+kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con1",node="node1",resource="storage",unit="byte"} 4e+08
+kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con2",node="node1",resource="cpu",unit="core"} 0.3
+kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con2",node="node1",resource="memory",unit="byte"} 2e+08
kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Completed"} 0
-kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Error"} 0
kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container2",reason="ContainerCannotRun"} 0
kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container2",reason="DeadlineExceeded"} 0
+kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Error"} 0
kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Evicted"} 0
-kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container3",reason="OOMKilled"} 0
+kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container2",reason="OOMKilled"} 1
kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Completed"} 0
-kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Error"} 0
kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container3",reason="ContainerCannotRun"} 0
kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container3",reason="DeadlineExceeded"} 0
+kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Error"} 0
kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Evicted"} 0
-# HELP kube_pod_init_container_status_last_terminated_reason Describes the last reason the init container was in terminated state.
-# TYPE kube_pod_init_container_status_last_terminated_reason gauge
-# HELP kube_pod_container_status_ready Describes whether the containers readiness check succeeded.
-# TYPE kube_pod_container_status_ready gauge
+kube_pod_container_status_last_terminated_reason{namespace="default",pod="pod0",container="container3",reason="OOMKilled"} 0
kube_pod_container_status_ready{namespace="default",pod="pod0",container="container2"} 0
kube_pod_container_status_ready{namespace="default",pod="pod0",container="container3"} 0
-# HELP kube_pod_init_container_status_ready Describes whether the init containers readiness check succeeded.
-# TYPE kube_pod_init_container_status_ready gauge
-# HELP kube_pod_container_status_restarts_total The number of container restarts per container.
-# TYPE kube_pod_container_status_restarts_total counter
kube_pod_container_status_restarts_total{namespace="default",pod="pod0",container="container2"} 0
kube_pod_container_status_restarts_total{namespace="default",pod="pod0",container="container3"} 0
-# HELP kube_pod_init_container_status_restarts_total The number of restarts for the init container.
-# TYPE kube_pod_init_container_status_restarts_total counter
-# HELP kube_pod_container_resource_requests_cpu_cores The number of CPU cores requested by a container.
-# TYPE kube_pod_container_resource_requests_cpu_cores gauge
-kube_pod_container_resource_requests_cpu_cores{namespace="default",pod="pod0",container="pod1_con1"} 0.2
-kube_pod_container_resource_requests_cpu_cores{namespace="default",pod="pod0",container="pod1_con2"} 0.3
-# HELP kube_pod_container_resource_requests_memory_bytes Bytes of memory requested by a container.
-# TYPE kube_pod_container_resource_requests_memory_bytes gauge
-kube_pod_container_resource_requests_memory_bytes{namespace="default",pod="pod0",container="pod1_con1"} 1e+08
-kube_pod_container_resource_requests_memory_bytes{namespace="default",pod="pod0",container="pod1_con2"} 2e+08
-# HELP kube_pod_container_resource_requests_storage_bytes Bytes of storage requested by a container.
-# TYPE kube_pod_container_resource_requests_storage_bytes gauge
-kube_pod_container_resource_requests_storage_bytes{namespace="default",pod="pod0",container="pod1_con1"} 4e+08
-# HELP kube_pod_container_resource_requests_ephemeral_storage_bytes Bytes of ephemeral-storage requested by a container.
-# TYPE kube_pod_container_resource_requests_ephemeral_storage_bytes gauge
-kube_pod_container_resource_requests_ephemeral_storage_bytes{namespace="default",pod="pod0",container="pod1_con1"} 3e+08
-# HELP kube_pod_container_resource_requests The number of requested request resource by a container.
-# TYPE kube_pod_container_resource_requests gauge
-kube_pod_container_resource_requests{namespace="default",pod="pod0",container="pod1_con1",resource="nvidia_com_gpu",unit="integer"} 1
-# HELP kube_pod_container_resource_limits_cpu_cores The number of CPU cores requested limit by a container.
-# TYPE kube_pod_container_resource_limits_cpu_cores gauge
-kube_pod_container_resource_limits_cpu_cores{namespace="default",pod="pod0",container="pod1_con1"} 0.2
-kube_pod_container_resource_limits_cpu_cores{namespace="default",pod="pod0",container="pod1_con2"} 0.3
-# HELP kube_pod_container_resource_limits_memory_bytes Bytes of memory requested limit by a container.
-# TYPE kube_pod_container_resource_limits_memory_bytes gauge
-kube_pod_container_resource_limits_memory_bytes{namespace="default",pod="pod0",container="pod1_con1"} 1e+08
-kube_pod_container_resource_limits_memory_bytes{namespace="default",pod="pod0",container="pod1_con2"} 2e+08
-# HELP kube_pod_container_resource_limits_storage_bytes Bytes of storage requested limit by a container.
-# TYPE kube_pod_container_resource_limits_storage_bytes gauge
-kube_pod_container_resource_limits_storage_bytes{namespace="default",pod="pod0",container="pod1_con1"} 4e+08
-# HELP kube_pod_container_resource_limits_ephemeral_storage_bytes Bytes of ephemeral-storage requested limit by a container.
-# TYPE kube_pod_container_resource_limits_ephemeral_storage_bytes gauge
-kube_pod_container_resource_limits_ephemeral_storage_bytes{namespace="default",pod="pod0",container="pod1_con1"} 3e+08
-# HELP kube_pod_container_resource_limits The number of requested limit resource by a container.
-# TYPE kube_pod_container_resource_limits gauge
-kube_pod_container_resource_limits{namespace="default",pod="pod0",container="pod1_con1",resource="nvidia_com_gpu",unit="integer"} 1
-# HELP kube_pod_init_container_resource_requests_cpu_cores The number of CPU cores requested by an init container.
-# TYPE kube_pod_init_container_resource_requests_cpu_cores gauge
-# HELP kube_pod_init_container_resource_requests_memory_bytes Bytes of memory requested by an init container.
-# TYPE kube_pod_init_container_resource_requests_memory_bytes gauge
-# HELP kube_pod_init_container_resource_requests_storage_bytes Bytes of storage requested by an init container.
-# TYPE kube_pod_init_container_resource_requests_storage_bytes gauge
-# HELP kube_pod_init_container_resource_requests_ephemeral_storage_bytes Bytes of ephemeral-storage requested by an init container.
-# TYPE kube_pod_init_container_resource_requests_ephemeral_storage_bytes gauge
-# HELP kube_pod_init_container_resource_requests The number of requested request resource by an init container.
-# TYPE kube_pod_init_container_resource_requests gauge
-# HELP kube_pod_init_container_resource_limits_cpu_cores The number of CPU cores requested limit by an init container.
-# TYPE kube_pod_init_container_resource_limits_cpu_cores gauge
-# HELP kube_pod_init_container_resource_limits_memory_bytes Bytes of memory requested limit by an init container.
-# TYPE kube_pod_init_container_resource_limits_memory_bytes gauge
-# HELP kube_pod_init_container_resource_limits_storage_bytes Bytes of storage requested limit by an init container.
-# TYPE kube_pod_init_container_resource_limits_storage_bytes gauge
-# HELP kube_pod_init_container_resource_limits_ephemeral_storage_bytes Bytes of ephemeral-storage requested limit by an init container.
-# TYPE kube_pod_init_container_resource_limits_ephemeral_storage_bytes gauge
-# HELP kube_pod_init_container_resource_limits The number of requested limit resource by an init container.
-# TYPE kube_pod_init_container_resource_limits gauge
-# HELP kube_pod_spec_volumes_persistentvolumeclaims_info Information about persistentvolumeclaim volumes in a pod.
-# TYPE kube_pod_spec_volumes_persistentvolumeclaims_info gauge
-# HELP kube_pod_spec_volumes_persistentvolumeclaims_readonly Describes whether a persistentvolumeclaim is mounted read only.
-# TYPE kube_pod_spec_volumes_persistentvolumeclaims_readonly gauge
-# HELP kube_pod_overhead_cpu_cores The pod overhead in regards to cpu cores associated with running a pod.
-# TYPE kube_pod_overhead_cpu_cores gauge
-# HELP kube_pod_overhead_memory_bytes The pod overhead in regards to memory associated with running a pod.
-# TYPE kube_pod_overhead_memory_bytes gauge
+kube_pod_container_status_running{namespace="default",pod="pod0",container="container2"} 0
+kube_pod_container_status_running{namespace="default",pod="pod0",container="container3"} 0
+kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Completed"} 0
+kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="ContainerCannotRun"} 0
+kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="DeadlineExceeded"} 0
+kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Error"} 0
+kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="Evicted"} 0
+kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container2",reason="OOMKilled"} 0
+kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Completed"} 0
+kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="ContainerCannotRun"} 0
+kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="DeadlineExceeded"} 0
+kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Error"} 0
+kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="Evicted"} 0
+kube_pod_container_status_terminated_reason{namespace="default",pod="pod0",container="container3",reason="OOMKilled"} 0
+kube_pod_container_status_terminated{namespace="default",pod="pod0",container="container2"} 0
+kube_pod_container_status_terminated{namespace="default",pod="pod0",container="container3"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="ContainerCreating"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="CrashLoopBackOff"} 1
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="CreateContainerConfigError"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="CreateContainerError"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="ErrImagePull"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="ImagePullBackOff"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container2",reason="InvalidImageName"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="ContainerCreating"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="CrashLoopBackOff"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="CreateContainerConfigError"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="CreateContainerError"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="ErrImagePull"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="ImagePullBackOff"} 0
+kube_pod_container_status_waiting_reason{namespace="default",pod="pod0",container="container3",reason="InvalidImageName"} 0
+kube_pod_container_status_waiting{namespace="default",pod="pod0",container="container2"} 1
+kube_pod_container_status_waiting{namespace="default",pod="pod0",container="container3"} 0
+kube_pod_created{namespace="default",pod="pod0"} 1.5e+09
+kube_pod_info{namespace="default",pod="pod0",host_ip="1.1.1.1",pod_ip="1.2.3.4",uid="abc-0",node="node1",created_by_kind="",created_by_name="",priority_class="",host_network="false"} 1
+kube_pod_labels{namespace="default",pod="pod0"} 1
+kube_pod_owner{namespace="default",pod="pod0",owner_kind="",owner_name="",owner_is_controller=""} 1
+kube_pod_restart_policy{namespace="default",pod="pod0",type="Always"} 1
+kube_pod_status_phase{namespace="default",pod="pod0",phase="Failed"} 0
+kube_pod_status_phase{namespace="default",pod="pod0",phase="Pending"} 0
+kube_pod_status_phase{namespace="default",pod="pod0",phase="Running"} 1
+kube_pod_status_phase{namespace="default",pod="pod0",phase="Succeeded"} 0
+kube_pod_status_phase{namespace="default",pod="pod0",phase="Unknown"} 0
+kube_pod_status_reason{namespace="default",pod="pod0",reason="Evicted"} 0
+kube_pod_status_reason{namespace="default",pod="pod0",reason="NodeLost"} 0
+kube_pod_status_reason{namespace="default",pod="pod0",reason="UnexpectedAdmissionError"} 0
`
expectedSplit := strings.Split(strings.TrimSpace(expected), "\n")
@@ -371,12 +352,14 @@ kube_pod_container_resource_limits{namespace="default",pod="pod0",container="pod
sort.Strings(gotFiltered)
if len(expectedSplit) != len(gotFiltered) {
- t.Fatalf("expected different output length, expected \n\n%s\n\ngot\n\n%s", expected, string(body))
+ fmt.Println(len(expectedSplit))
+ fmt.Println(len(gotFiltered))
+ t.Fatalf("expected different output length, expected \n\n%s\n\ngot\n\n%s", expected, strings.Join(gotFiltered, "\n"))
}
for i := 0; i < len(expectedSplit); i++ {
if expectedSplit[i] != gotFiltered[i] {
- t.Fatalf("expected:\n\n%v, but got:\n\n%v", expectedSplit[i], gotFiltered[i])
+ t.Fatalf("expected:\n\n%v\n, but got:\n\n%v", expectedSplit[i], gotFiltered[i])
}
}
}
diff --git a/pkg/allow/allow_labels.go b/pkg/allow/allow_labels.go
index 4ac4116ecb..6051d14fb4 100644
--- a/pkg/allow/allow_labels.go
+++ b/pkg/allow/allow_labels.go
@@ -49,7 +49,7 @@ func (a Labels) Allowed(metric string, labels, values []string) ([]string, []str
var finalLabels, finalValues []string
labelSet := labelSet(allowedLabels)
- for allowedLabel := range labelSet {
+ for _, allowedLabel := range labelSet {
for i, label := range labels {
if label == allowedLabel {
finalLabels = append(finalLabels, label)
@@ -61,12 +61,16 @@ func (a Labels) Allowed(metric string, labels, values []string) ([]string, []str
return finalLabels, finalValues
}
-func labelSet(lists ...[]string) map[string]interface{} {
+func labelSet(lists ...[]string) []string {
m := make(map[string]interface{})
+ var set []string
for _, list := range lists {
for _, e := range list {
- m[e] = struct{}{}
+ if _, ok := m[e]; !ok {
+ m[e] = struct{}{}
+ set = append(set, e)
+ }
}
}
- return m
+ return set
}
diff --git a/scripts/jsonnetfile.json b/scripts/jsonnetfile.json
index 5f706924e2..4d38c94e91 100644
--- a/scripts/jsonnetfile.json
+++ b/scripts/jsonnetfile.json
@@ -18,5 +18,5 @@
"version": ""
}
],
- "legacyImports": true
+ "legacyImports": false
}
diff --git a/scripts/jsonnetfile.lock.json b/scripts/jsonnetfile.lock.json
index f9be92fd96..4d38c94e91 100644
--- a/scripts/jsonnetfile.lock.json
+++ b/scripts/jsonnetfile.lock.json
@@ -1,17 +1,6 @@
{
"version": 1,
"dependencies": [
- {
- "source": {
- "git": {
- "remote": "https://github.com/ksonnet/ksonnet-lib.git",
- "subdir": ""
- }
- },
- "version": "0d2f82676817bbf9e4acf6495b2090205f323b9f",
- "sum": "h28BXZ7+vczxYJ2sCt8JuR9+yznRtU/iA6DCpQUrtEg=",
- "name": "ksonnet"
- },
{
"source": {
"local": {
diff --git a/tests/manifests/csr.yaml b/tests/manifests/csr.yaml
index 9f6a718633..f09ae6d801 100644
--- a/tests/manifests/csr.yaml
+++ b/tests/manifests/csr.yaml
@@ -1,4 +1,4 @@
-apiVersion: certificates.k8s.io/v1beta1
+apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: my-svc.my-namespace
@@ -7,6 +7,7 @@ spec:
- system:masters
- system:authenticated
request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQllqQ0NBUWdDQVFBd01ERXVNQ3dHQTFVRUF4TWxiWGt0Y0c5a0xtMTVMVzVoYldWemNHRmpaUzV3YjJRdQpZMngxYzNSbGNpNXNiMk5oYkRCWk1CTUdCeXFHU000OUFnRUdDQ3FHU000OUF3RUhBMElBQkpHai9CazNkTVNjCmNmQWdqbWk2d2wxdVYrQVNIR1g1ZHluWHFWdmJsaUd4clFBL2FFOWY0NDc5eFpVR0lDNjFPSmwrR0JJVGhBV0cKWlFiTEhDQ0xscXVnZGpCMEJna3Foa2lHOXcwQkNRNHhaekJsTUdNR0ExVWRFUVJjTUZxQ0pXMTVMWE4yWXk1dAplUzF1WVcxbGMzQmhZMlV1YzNaakxtTnNkWE4wWlhJdWJHOWpZV3lDSlcxNUxYQnZaQzV0ZVMxdVlXMWxjM0JoClkyVXVjRzlrTG1Oc2RYTjBaWEl1Ykc5allXeUhCS3lvQUJpSEJBb0FJZ0l3Q2dZSUtvWkl6ajBFQXdJRFNBQXcKUlFJZ1psb0J6Vkp4UkpjeUlweHZ1WGhTWFRhM3lPaXJDVVRCZytqQk5DUUcyT29DSVFDQVV6c2IzYWxuV1ljdAp5eGxEVEgxZkF6dms3R0ZINVVhd3RwaitWREFJNHc9PQotLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0K
+ signerName: kubernetes.io/kube-apiserver-client
usages:
- digital signature
- key encipherment
diff --git a/tests/manifests/ingress.yaml b/tests/manifests/ingress.yaml
index d0fd3e6b9f..05428277e1 100644
--- a/tests/manifests/ingress.yaml
+++ b/tests/manifests/ingress.yaml
@@ -1,4 +1,4 @@
-apiVersion: extensions/v1beta1
+apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: example-ingress
@@ -9,10 +9,16 @@ spec:
- http:
paths:
- path: /apple
+ pathType: Prefix
backend:
- serviceName: apple-service
- servicePort: 5678
+ service:
+ name: apple-service
+ port:
+ number: 5678
- path: /banana
+ pathType: Prefix
backend:
- serviceName: banana-service
- servicePort: 5678
+ service:
+ name: banana-service
+ port:
+ number: 5678