Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fixed four resources with copy/paste issues #1580

Merged
merged 1 commit into from
Sep 20, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion internal/store/cronjob.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func cronJobMetricFamilies(allowAnnotationsList, allowLabelsList []string) []gen
metric.Gauge,
"",
wrapCronJobFunc(func(j *batchv1beta1.CronJob) *metric.Family {
annotationKeys, annotationValues := createPrometheusLabelKeysValues("label", j.Annotations, allowLabelsList)
annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", j.Annotations, allowAnnotationsList)
return &metric.Family{
Metrics: []*metric.Metric{
{
Expand Down
29 changes: 26 additions & 3 deletions internal/store/cronjob_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,9 @@ func TestCronJobStore(t *testing.T) {

cases := []generateMetricsTestCase{
{
AllowAnnotationsList: []string{
"app.k8s.io/owner",
},
Obj: &batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "ActiveRunningCronJob1",
Expand All @@ -112,6 +115,10 @@ func TestCronJobStore(t *testing.T) {
Labels: map[string]string{
"app": "example-active-running-1",
},
Annotations: map[string]string{
"app": "mysql-server",
"app.k8s.io/owner": "@foo",
},
},
Status: batchv1beta1.CronJobStatus{
Active: []v1.ObjectReference{{Name: "FakeJob1"}, {Name: "FakeJob2"}},
Expand All @@ -129,6 +136,7 @@ func TestCronJobStore(t *testing.T) {
Want: `
# HELP kube_cronjob_created Unix creation timestamp
# HELP kube_cronjob_info Info about cronjob.
# HELP kube_cronjob_annotations Kubernetes annotations converted to Prometheus labels.
# HELP kube_cronjob_labels Kubernetes labels converted to Prometheus labels.
# HELP kube_cronjob_next_schedule_time Next time the cronjob should be scheduled. The time after lastScheduleTime, or after the cron job's creation time if it's never been scheduled. Use this to determine if the job is delayed.
# HELP kube_cronjob_spec_failed_job_history_limit Failed job history limit tells the controller how many failed jobs should be preserved.
Expand All @@ -140,6 +148,7 @@ func TestCronJobStore(t *testing.T) {
# HELP kube_cronjob_status_last_schedule_time LastScheduleTime keeps information of when was the last time the job was successfully scheduled.
# TYPE kube_cronjob_created gauge
# TYPE kube_cronjob_info gauge
# TYPE kube_cronjob_annotations gauge
# TYPE kube_cronjob_labels gauge
# TYPE kube_cronjob_next_schedule_time gauge
# TYPE kube_cronjob_spec_failed_job_history_limit gauge
Expand All @@ -150,6 +159,7 @@ func TestCronJobStore(t *testing.T) {
# TYPE kube_cronjob_metadata_resource_version gauge
# TYPE kube_cronjob_status_last_schedule_time gauge
kube_cronjob_info{concurrency_policy="Forbid",cronjob="ActiveRunningCronJob1",namespace="ns1",schedule="0 */6 * * *"} 1
kube_cronjob_annotations{annotation_app_k8s_io_owner="@foo",cronjob="ActiveRunningCronJob1",namespace="ns1"} 1
kube_cronjob_labels{cronjob="ActiveRunningCronJob1",namespace="ns1"} 1
kube_cronjob_spec_failed_job_history_limit{cronjob="ActiveRunningCronJob1",namespace="ns1"} 1
kube_cronjob_spec_starting_deadline_seconds{cronjob="ActiveRunningCronJob1",namespace="ns1"} 300
Expand All @@ -160,7 +170,20 @@ func TestCronJobStore(t *testing.T) {
kube_cronjob_status_last_schedule_time{cronjob="ActiveRunningCronJob1",namespace="ns1"} 1.520742896e+09
` + fmt.Sprintf("kube_cronjob_next_schedule_time{cronjob=\"ActiveRunningCronJob1\",namespace=\"ns1\"} %ve+09\n",
float64(ActiveRunningCronJob1NextScheduleTime.Unix())/math.Pow10(9)),
MetricNames: []string{"kube_cronjob_next_schedule_time", "kube_cronjob_spec_starting_deadline_seconds", "kube_cronjob_status_active", "kube_cronjob_metadata_resource_version", "kube_cronjob_spec_suspend", "kube_cronjob_info", "kube_cronjob_created", "kube_cronjob_labels", "kube_cronjob_status_last_schedule_time", "kube_cronjob_spec_successful_job_history_limit", "kube_cronjob_spec_failed_job_history_limit"},
MetricNames: []string{
"kube_cronjob_next_schedule_time",
"kube_cronjob_spec_starting_deadline_seconds",
"kube_cronjob_status_active",
"kube_cronjob_metadata_resource_version",
"kube_cronjob_spec_suspend",
"kube_cronjob_info",
"kube_cronjob_created",
"kube_cronjob_annotations",
"kube_cronjob_labels",
"kube_cronjob_status_last_schedule_time",
"kube_cronjob_spec_successful_job_history_limit",
"kube_cronjob_spec_failed_job_history_limit",
},
},
{
Obj: &batchv1beta1.CronJob{
Expand Down Expand Up @@ -281,8 +304,8 @@ func TestCronJobStore(t *testing.T) {
},
}
for i, c := range cases {
c.Func = generator.ComposeMetricGenFuncs(cronJobMetricFamilies(nil, nil))
c.Headers = generator.ExtractMetricFamilyHeaders(cronJobMetricFamilies(nil, nil))
c.Func = generator.ComposeMetricGenFuncs(cronJobMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList))
c.Headers = generator.ExtractMetricFamilyHeaders(cronJobMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList))
if err := c.run(); err != nil {
t.Errorf("unexpected collecting result in %vth run:\n%s", i, err)
}
Expand Down
2 changes: 1 addition & 1 deletion internal/store/daemonset.go
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ func daemonSetMetricFamilies(allowAnnotationsList, allowLabelsList []string) []g
metric.Gauge,
"",
wrapDaemonSetFunc(func(d *v1.DaemonSet) *metric.Family {
annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", d.Annotations, allowLabelsList)
annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", d.Annotations, allowAnnotationsList)
return &metric.Family{
Metrics: []*metric.Metric{
{
Expand Down
15 changes: 13 additions & 2 deletions internal/store/daemonset_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,20 @@ import (
func TestDaemonSetStore(t *testing.T) {
cases := []generateMetricsTestCase{
{
AllowAnnotationsList: []string{
"app.k8s.io/owner",
},
Obj: &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: "ds1",
Namespace: "ns1",
Labels: map[string]string{
"app": "example1",
},
Annotations: map[string]string{
"app": "mysql-server",
"app.k8s.io/owner": "@foo",
},
Generation: 21,
},
Status: v1.DaemonSetStatus{
Expand All @@ -47,6 +54,7 @@ func TestDaemonSetStore(t *testing.T) {
},
},
Want: `
# HELP kube_daemonset_annotations Kubernetes annotations converted to Prometheus labels.
# HELP kube_daemonset_labels Kubernetes labels converted to Prometheus labels.
# HELP kube_daemonset_metadata_generation Sequence number representing a specific generation of the desired state.
# HELP kube_daemonset_status_current_number_scheduled The number of nodes running at least one daemon pod and are supposed to.
Expand All @@ -57,6 +65,7 @@ func TestDaemonSetStore(t *testing.T) {
# HELP kube_daemonset_status_number_unavailable The number of nodes that should be running the daemon pod and have none of the daemon pod running and available
# HELP kube_daemonset_status_observed_generation The most recent generation observed by the daemon set controller.
# HELP kube_daemonset_status_updated_number_scheduled The total number of nodes that are running updated daemon pod
# TYPE kube_daemonset_annotations gauge
# TYPE kube_daemonset_labels gauge
# TYPE kube_daemonset_metadata_generation gauge
# TYPE kube_daemonset_status_current_number_scheduled gauge
Expand All @@ -76,9 +85,11 @@ func TestDaemonSetStore(t *testing.T) {
kube_daemonset_status_number_unavailable{daemonset="ds1",namespace="ns1"} 0
kube_daemonset_status_observed_generation{daemonset="ds1",namespace="ns1"} 2
kube_daemonset_status_updated_number_scheduled{daemonset="ds1",namespace="ns1"} 0
kube_daemonset_annotations{annotation_app_k8s_io_owner="@foo",daemonset="ds1",namespace="ns1"} 1
kube_daemonset_labels{daemonset="ds1",namespace="ns1"} 1
`,
MetricNames: []string{
"kube_daemonset_annotations",
"kube_daemonset_labels",
"kube_daemonset_metadata_generation",
"kube_daemonset_status_current_number_scheduled",
Expand Down Expand Up @@ -222,8 +233,8 @@ func TestDaemonSetStore(t *testing.T) {
},
}
for i, c := range cases {
c.Func = generator.ComposeMetricGenFuncs(daemonSetMetricFamilies(nil, nil))
c.Headers = generator.ExtractMetricFamilyHeaders(daemonSetMetricFamilies(nil, nil))
c.Func = generator.ComposeMetricGenFuncs(daemonSetMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList))
c.Headers = generator.ExtractMetricFamilyHeaders(daemonSetMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList))
if err := c.run(); err != nil {
t.Errorf("unexpected collecting result in %vth run:\n%s", i, err)
}
Expand Down
2 changes: 1 addition & 1 deletion internal/store/horizontalpodautoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ func hpaMetricFamilies(allowAnnotationsList, allowLabelsList []string) []generat
metric.Gauge,
"",
wrapHPAFunc(func(a *autoscaling.HorizontalPodAutoscaler) *metric.Family {
annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", a.Annotations, allowLabelsList)
annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", a.Annotations, allowAnnotationsList)
return &metric.Family{
Metrics: []*metric.Metric{
{
Expand Down
17 changes: 15 additions & 2 deletions internal/store/horizontalpodautoscaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ func TestHPAStore(t *testing.T) {
// Fixed metadata on type and help text. We prepend this to every expected
// output so we only have to modify a single place when doing adjustments.
const metadata = `
# HELP kube_horizontalpodautoscaler_annotations Kubernetes annotations converted to Prometheus labels.
# HELP kube_horizontalpodautoscaler_labels Kubernetes labels converted to Prometheus labels.
# HELP kube_horizontalpodautoscaler_metadata_generation The generation observed by the HorizontalPodAutoscaler controller.
# HELP kube_horizontalpodautoscaler_spec_max_replicas Upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
Expand All @@ -43,6 +44,7 @@ func TestHPAStore(t *testing.T) {
# HELP kube_horizontalpodautoscaler_status_condition The condition of this autoscaler.
# HELP kube_horizontalpodautoscaler_status_current_replicas Current number of replicas of pods managed by this autoscaler.
# HELP kube_horizontalpodautoscaler_status_desired_replicas Desired number of replicas of pods managed by this autoscaler.
# TYPE kube_horizontalpodautoscaler_annotations gauge
# TYPE kube_horizontalpodautoscaler_labels gauge
# TYPE kube_horizontalpodautoscaler_metadata_generation gauge
# TYPE kube_horizontalpodautoscaler_spec_max_replicas gauge
Expand Down Expand Up @@ -181,6 +183,7 @@ func TestHPAStore(t *testing.T) {
},
},
Want: metadata + `
kube_horizontalpodautoscaler_annotations{horizontalpodautoscaler="hpa1",namespace="ns1"} 1
kube_horizontalpodautoscaler_labels{horizontalpodautoscaler="hpa1",namespace="ns1"} 1
kube_horizontalpodautoscaler_metadata_generation{horizontalpodautoscaler="hpa1",namespace="ns1"} 2
kube_horizontalpodautoscaler_spec_max_replicas{horizontalpodautoscaler="hpa1",namespace="ns1"} 4
Expand All @@ -207,11 +210,15 @@ func TestHPAStore(t *testing.T) {
"kube_horizontalpodautoscaler_status_current_replicas",
"kube_horizontalpodautoscaler_status_desired_replicas",
"kube_horizontalpodautoscaler_status_condition",
"kube_horizontalpodautoscaler_annotations",
"kube_horizontalpodautoscaler_labels",
},
},
{
// Verify populating base metric.
AllowAnnotationsList: []string{
"app.k8s.io/owner",
},
Obj: &autoscaling.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Generation: 2,
Expand All @@ -220,6 +227,10 @@ func TestHPAStore(t *testing.T) {
Labels: map[string]string{
"app": "foobar",
},
Annotations: map[string]string{
"app": "mysql-server",
"app.k8s.io/owner": "@foo",
},
},
Spec: autoscaling.HorizontalPodAutoscalerSpec{
MaxReplicas: 4,
Expand Down Expand Up @@ -330,6 +341,7 @@ func TestHPAStore(t *testing.T) {
},
},
Want: metadata + `
kube_horizontalpodautoscaler_annotations{annotation_app_k8s_io_owner="@foo",horizontalpodautoscaler="hpa2",namespace="ns1"} 1
kube_horizontalpodautoscaler_labels{horizontalpodautoscaler="hpa2",namespace="ns1"} 1
kube_horizontalpodautoscaler_metadata_generation{horizontalpodautoscaler="hpa2",namespace="ns1"} 2
kube_horizontalpodautoscaler_spec_max_replicas{horizontalpodautoscaler="hpa2",namespace="ns1"} 4
Expand All @@ -352,13 +364,14 @@ func TestHPAStore(t *testing.T) {
"kube_horizontalpodautoscaler_status_current_replicas",
"kube_horizontalpodautoscaler_status_desired_replicas",
"kube_horizontalpodautoscaler_status_condition",
"kube_horizontalpodautoscaler_annotation",
"kube_horizontalpodautoscaler_labels",
},
},
}
for i, c := range cases {
c.Func = generator.ComposeMetricGenFuncs(hpaMetricFamilies(nil, nil))
c.Headers = generator.ExtractMetricFamilyHeaders(hpaMetricFamilies(nil, nil))
c.Func = generator.ComposeMetricGenFuncs(hpaMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList))
c.Headers = generator.ExtractMetricFamilyHeaders(hpaMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList))
if err := c.run(); err != nil {
t.Errorf("unexpected collecting result in %vth run:\n%s", i, err)
}
Expand Down
2 changes: 1 addition & 1 deletion internal/store/ingress.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ func ingressMetricFamilies(allowAnnotationsList, allowLabelsList []string) []gen
metric.Gauge,
"",
wrapIngressFunc(func(i *networkingv1.Ingress) *metric.Family {
annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", i.Annotations, allowLabelsList)
annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", i.Annotations, allowAnnotationsList)
return &metric.Family{
Metrics: []*metric.Metric{
{
Expand Down
29 changes: 25 additions & 4 deletions internal/store/ingress_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,19 +47,40 @@ func TestIngressStore(t *testing.T) {
`
cases := []generateMetricsTestCase{
{
AllowAnnotationsList: []string{
"app.k8s.io/owner",
},
Obj: &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "ingress1",
Namespace: "ns1",
ResourceVersion: "000000",
Annotations: map[string]string{
"app": "mysql-server",
"app.k8s.io/owner": "@foo",
},
},
},
Want: metadata + `
Want: `
# HELP kube_ingress_info Information about ingress.
# HELP kube_ingress_annotations Kubernetes annotations converted to Prometheus labels.
# HELP kube_ingress_labels Kubernetes labels converted to Prometheus labels.
# HELP kube_ingress_metadata_resource_version Resource version representing a specific version of ingress.
# TYPE kube_ingress_info gauge
# TYPE kube_ingress_annotations gauge
# TYPE kube_ingress_labels gauge
# TYPE kube_ingress_metadata_resource_version gauge
kube_ingress_info{namespace="ns1",ingress="ingress1"} 1
kube_ingress_metadata_resource_version{namespace="ns1",ingress="ingress1"} 0
kube_ingress_annotations{annotation_app_k8s_io_owner="@foo",namespace="ns1",ingress="ingress1"} 1
kube_ingress_labels{namespace="ns1",ingress="ingress1"} 1
`,
MetricNames: []string{"kube_ingress_info", "kube_ingress_metadata_resource_version", "kube_ingress_created", "kube_ingress_labels", "kube_ingress_path", "kube_ingress_tls"},
MetricNames: []string{
"kube_ingress_info",
"kube_ingress_metadata_resource_version",
"kube_ingress_annotations",
"kube_ingress_labels",
},
},
{
Obj: &networkingv1.Ingress{
Expand Down Expand Up @@ -169,8 +190,8 @@ func TestIngressStore(t *testing.T) {
},
}
for i, c := range cases {
c.Func = generator.ComposeMetricGenFuncs(ingressMetricFamilies(nil, nil))
c.Headers = generator.ExtractMetricFamilyHeaders(ingressMetricFamilies(nil, nil))
c.Func = generator.ComposeMetricGenFuncs(ingressMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList))
c.Headers = generator.ExtractMetricFamilyHeaders(ingressMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList))
if err := c.run(); err != nil {
t.Errorf("unexpected collecting result in %vth run:\n%s", i, err)
}
Expand Down