From 6512e2ce2727500696bbee04e3b0629f2c50b32b Mon Sep 17 00:00:00 2001 From: Gabi Davar Date: Mon, 20 Sep 2021 18:02:56 +0300 Subject: [PATCH] fixed four resources with copy/paste issues Signed-off-by: Gabi Davar --- internal/store/cronjob.go | 2 +- internal/store/cronjob_test.go | 29 +++++++++++++++++-- internal/store/daemonset.go | 2 +- internal/store/daemonset_test.go | 15 ++++++++-- internal/store/horizontalpodautoscaler.go | 2 +- .../store/horizontalpodautoscaler_test.go | 17 +++++++++-- internal/store/ingress.go | 2 +- internal/store/ingress_test.go | 29 ++++++++++++++++--- 8 files changed, 83 insertions(+), 15 deletions(-) diff --git a/internal/store/cronjob.go b/internal/store/cronjob.go index 636bc48032..30db5309bd 100644 --- a/internal/store/cronjob.go +++ b/internal/store/cronjob.go @@ -49,7 +49,7 @@ func cronJobMetricFamilies(allowAnnotationsList, allowLabelsList []string) []gen metric.Gauge, "", wrapCronJobFunc(func(j *batchv1beta1.CronJob) *metric.Family { - annotationKeys, annotationValues := createPrometheusLabelKeysValues("label", j.Annotations, allowLabelsList) + annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", j.Annotations, allowAnnotationsList) return &metric.Family{ Metrics: []*metric.Metric{ { diff --git a/internal/store/cronjob_test.go b/internal/store/cronjob_test.go index ac41f8165f..0acd330772 100644 --- a/internal/store/cronjob_test.go +++ b/internal/store/cronjob_test.go @@ -103,6 +103,9 @@ func TestCronJobStore(t *testing.T) { cases := []generateMetricsTestCase{ { + AllowAnnotationsList: []string{ + "app.k8s.io/owner", + }, Obj: &batchv1beta1.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: "ActiveRunningCronJob1", @@ -112,6 +115,10 @@ func TestCronJobStore(t *testing.T) { Labels: map[string]string{ "app": "example-active-running-1", }, + Annotations: map[string]string{ + "app": "mysql-server", + "app.k8s.io/owner": "@foo", + }, }, Status: batchv1beta1.CronJobStatus{ Active: []v1.ObjectReference{{Name: "FakeJob1"}, {Name: "FakeJob2"}}, @@ -129,6 +136,7 @@ func TestCronJobStore(t *testing.T) { Want: ` # HELP kube_cronjob_created Unix creation timestamp # HELP kube_cronjob_info Info about cronjob. + # HELP kube_cronjob_annotations Kubernetes annotations converted to Prometheus labels. # HELP kube_cronjob_labels Kubernetes labels converted to Prometheus labels. # HELP kube_cronjob_next_schedule_time Next time the cronjob should be scheduled. The time after lastScheduleTime, or after the cron job's creation time if it's never been scheduled. Use this to determine if the job is delayed. # HELP kube_cronjob_spec_failed_job_history_limit Failed job history limit tells the controller how many failed jobs should be preserved. @@ -140,6 +148,7 @@ func TestCronJobStore(t *testing.T) { # HELP kube_cronjob_status_last_schedule_time LastScheduleTime keeps information of when was the last time the job was successfully scheduled. # TYPE kube_cronjob_created gauge # TYPE kube_cronjob_info gauge + # TYPE kube_cronjob_annotations gauge # TYPE kube_cronjob_labels gauge # TYPE kube_cronjob_next_schedule_time gauge # TYPE kube_cronjob_spec_failed_job_history_limit gauge @@ -150,6 +159,7 @@ func TestCronJobStore(t *testing.T) { # TYPE kube_cronjob_metadata_resource_version gauge # TYPE kube_cronjob_status_last_schedule_time gauge kube_cronjob_info{concurrency_policy="Forbid",cronjob="ActiveRunningCronJob1",namespace="ns1",schedule="0 */6 * * *"} 1 + kube_cronjob_annotations{annotation_app_k8s_io_owner="@foo",cronjob="ActiveRunningCronJob1",namespace="ns1"} 1 kube_cronjob_labels{cronjob="ActiveRunningCronJob1",namespace="ns1"} 1 kube_cronjob_spec_failed_job_history_limit{cronjob="ActiveRunningCronJob1",namespace="ns1"} 1 kube_cronjob_spec_starting_deadline_seconds{cronjob="ActiveRunningCronJob1",namespace="ns1"} 300 @@ -160,7 +170,20 @@ func TestCronJobStore(t *testing.T) { kube_cronjob_status_last_schedule_time{cronjob="ActiveRunningCronJob1",namespace="ns1"} 1.520742896e+09 ` + fmt.Sprintf("kube_cronjob_next_schedule_time{cronjob=\"ActiveRunningCronJob1\",namespace=\"ns1\"} %ve+09\n", float64(ActiveRunningCronJob1NextScheduleTime.Unix())/math.Pow10(9)), - MetricNames: []string{"kube_cronjob_next_schedule_time", "kube_cronjob_spec_starting_deadline_seconds", "kube_cronjob_status_active", "kube_cronjob_metadata_resource_version", "kube_cronjob_spec_suspend", "kube_cronjob_info", "kube_cronjob_created", "kube_cronjob_labels", "kube_cronjob_status_last_schedule_time", "kube_cronjob_spec_successful_job_history_limit", "kube_cronjob_spec_failed_job_history_limit"}, + MetricNames: []string{ + "kube_cronjob_next_schedule_time", + "kube_cronjob_spec_starting_deadline_seconds", + "kube_cronjob_status_active", + "kube_cronjob_metadata_resource_version", + "kube_cronjob_spec_suspend", + "kube_cronjob_info", + "kube_cronjob_created", + "kube_cronjob_annotations", + "kube_cronjob_labels", + "kube_cronjob_status_last_schedule_time", + "kube_cronjob_spec_successful_job_history_limit", + "kube_cronjob_spec_failed_job_history_limit", + }, }, { Obj: &batchv1beta1.CronJob{ @@ -281,8 +304,8 @@ func TestCronJobStore(t *testing.T) { }, } for i, c := range cases { - c.Func = generator.ComposeMetricGenFuncs(cronJobMetricFamilies(nil, nil)) - c.Headers = generator.ExtractMetricFamilyHeaders(cronJobMetricFamilies(nil, nil)) + c.Func = generator.ComposeMetricGenFuncs(cronJobMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList)) + c.Headers = generator.ExtractMetricFamilyHeaders(cronJobMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList)) if err := c.run(); err != nil { t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) } diff --git a/internal/store/daemonset.go b/internal/store/daemonset.go index 07e3a38f1f..e9fd4b42c4 100644 --- a/internal/store/daemonset.go +++ b/internal/store/daemonset.go @@ -218,7 +218,7 @@ func daemonSetMetricFamilies(allowAnnotationsList, allowLabelsList []string) []g metric.Gauge, "", wrapDaemonSetFunc(func(d *v1.DaemonSet) *metric.Family { - annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", d.Annotations, allowLabelsList) + annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", d.Annotations, allowAnnotationsList) return &metric.Family{ Metrics: []*metric.Metric{ { diff --git a/internal/store/daemonset_test.go b/internal/store/daemonset_test.go index 5a9bb2faa0..e7cea91e79 100644 --- a/internal/store/daemonset_test.go +++ b/internal/store/daemonset_test.go @@ -29,6 +29,9 @@ import ( func TestDaemonSetStore(t *testing.T) { cases := []generateMetricsTestCase{ { + AllowAnnotationsList: []string{ + "app.k8s.io/owner", + }, Obj: &v1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "ds1", @@ -36,6 +39,10 @@ func TestDaemonSetStore(t *testing.T) { Labels: map[string]string{ "app": "example1", }, + Annotations: map[string]string{ + "app": "mysql-server", + "app.k8s.io/owner": "@foo", + }, Generation: 21, }, Status: v1.DaemonSetStatus{ @@ -47,6 +54,7 @@ func TestDaemonSetStore(t *testing.T) { }, }, Want: ` + # HELP kube_daemonset_annotations Kubernetes annotations converted to Prometheus labels. # HELP kube_daemonset_labels Kubernetes labels converted to Prometheus labels. # HELP kube_daemonset_metadata_generation Sequence number representing a specific generation of the desired state. # HELP kube_daemonset_status_current_number_scheduled The number of nodes running at least one daemon pod and are supposed to. @@ -57,6 +65,7 @@ func TestDaemonSetStore(t *testing.T) { # HELP kube_daemonset_status_number_unavailable The number of nodes that should be running the daemon pod and have none of the daemon pod running and available # HELP kube_daemonset_status_observed_generation The most recent generation observed by the daemon set controller. # HELP kube_daemonset_status_updated_number_scheduled The total number of nodes that are running updated daemon pod + # TYPE kube_daemonset_annotations gauge # TYPE kube_daemonset_labels gauge # TYPE kube_daemonset_metadata_generation gauge # TYPE kube_daemonset_status_current_number_scheduled gauge @@ -76,9 +85,11 @@ func TestDaemonSetStore(t *testing.T) { kube_daemonset_status_number_unavailable{daemonset="ds1",namespace="ns1"} 0 kube_daemonset_status_observed_generation{daemonset="ds1",namespace="ns1"} 2 kube_daemonset_status_updated_number_scheduled{daemonset="ds1",namespace="ns1"} 0 + kube_daemonset_annotations{annotation_app_k8s_io_owner="@foo",daemonset="ds1",namespace="ns1"} 1 kube_daemonset_labels{daemonset="ds1",namespace="ns1"} 1 `, MetricNames: []string{ + "kube_daemonset_annotations", "kube_daemonset_labels", "kube_daemonset_metadata_generation", "kube_daemonset_status_current_number_scheduled", @@ -222,8 +233,8 @@ func TestDaemonSetStore(t *testing.T) { }, } for i, c := range cases { - c.Func = generator.ComposeMetricGenFuncs(daemonSetMetricFamilies(nil, nil)) - c.Headers = generator.ExtractMetricFamilyHeaders(daemonSetMetricFamilies(nil, nil)) + c.Func = generator.ComposeMetricGenFuncs(daemonSetMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList)) + c.Headers = generator.ExtractMetricFamilyHeaders(daemonSetMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList)) if err := c.run(); err != nil { t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) } diff --git a/internal/store/horizontalpodautoscaler.go b/internal/store/horizontalpodautoscaler.go index 3714ea9a35..44b1a5510f 100644 --- a/internal/store/horizontalpodautoscaler.go +++ b/internal/store/horizontalpodautoscaler.go @@ -199,7 +199,7 @@ func hpaMetricFamilies(allowAnnotationsList, allowLabelsList []string) []generat metric.Gauge, "", wrapHPAFunc(func(a *autoscaling.HorizontalPodAutoscaler) *metric.Family { - annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", a.Annotations, allowLabelsList) + annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", a.Annotations, allowAnnotationsList) return &metric.Family{ Metrics: []*metric.Metric{ { diff --git a/internal/store/horizontalpodautoscaler_test.go b/internal/store/horizontalpodautoscaler_test.go index 101d6a4af3..1ee64255b4 100644 --- a/internal/store/horizontalpodautoscaler_test.go +++ b/internal/store/horizontalpodautoscaler_test.go @@ -35,6 +35,7 @@ func TestHPAStore(t *testing.T) { // Fixed metadata on type and help text. We prepend this to every expected // output so we only have to modify a single place when doing adjustments. const metadata = ` + # HELP kube_horizontalpodautoscaler_annotations Kubernetes annotations converted to Prometheus labels. # HELP kube_horizontalpodautoscaler_labels Kubernetes labels converted to Prometheus labels. # HELP kube_horizontalpodautoscaler_metadata_generation The generation observed by the HorizontalPodAutoscaler controller. # HELP kube_horizontalpodautoscaler_spec_max_replicas Upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. @@ -43,6 +44,7 @@ func TestHPAStore(t *testing.T) { # HELP kube_horizontalpodautoscaler_status_condition The condition of this autoscaler. # HELP kube_horizontalpodautoscaler_status_current_replicas Current number of replicas of pods managed by this autoscaler. # HELP kube_horizontalpodautoscaler_status_desired_replicas Desired number of replicas of pods managed by this autoscaler. + # TYPE kube_horizontalpodautoscaler_annotations gauge # TYPE kube_horizontalpodautoscaler_labels gauge # TYPE kube_horizontalpodautoscaler_metadata_generation gauge # TYPE kube_horizontalpodautoscaler_spec_max_replicas gauge @@ -181,6 +183,7 @@ func TestHPAStore(t *testing.T) { }, }, Want: metadata + ` + kube_horizontalpodautoscaler_annotations{horizontalpodautoscaler="hpa1",namespace="ns1"} 1 kube_horizontalpodautoscaler_labels{horizontalpodautoscaler="hpa1",namespace="ns1"} 1 kube_horizontalpodautoscaler_metadata_generation{horizontalpodautoscaler="hpa1",namespace="ns1"} 2 kube_horizontalpodautoscaler_spec_max_replicas{horizontalpodautoscaler="hpa1",namespace="ns1"} 4 @@ -207,11 +210,15 @@ func TestHPAStore(t *testing.T) { "kube_horizontalpodautoscaler_status_current_replicas", "kube_horizontalpodautoscaler_status_desired_replicas", "kube_horizontalpodautoscaler_status_condition", + "kube_horizontalpodautoscaler_annotations", "kube_horizontalpodautoscaler_labels", }, }, { // Verify populating base metric. + AllowAnnotationsList: []string{ + "app.k8s.io/owner", + }, Obj: &autoscaling.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Generation: 2, @@ -220,6 +227,10 @@ func TestHPAStore(t *testing.T) { Labels: map[string]string{ "app": "foobar", }, + Annotations: map[string]string{ + "app": "mysql-server", + "app.k8s.io/owner": "@foo", + }, }, Spec: autoscaling.HorizontalPodAutoscalerSpec{ MaxReplicas: 4, @@ -330,6 +341,7 @@ func TestHPAStore(t *testing.T) { }, }, Want: metadata + ` + kube_horizontalpodautoscaler_annotations{annotation_app_k8s_io_owner="@foo",horizontalpodautoscaler="hpa2",namespace="ns1"} 1 kube_horizontalpodautoscaler_labels{horizontalpodautoscaler="hpa2",namespace="ns1"} 1 kube_horizontalpodautoscaler_metadata_generation{horizontalpodautoscaler="hpa2",namespace="ns1"} 2 kube_horizontalpodautoscaler_spec_max_replicas{horizontalpodautoscaler="hpa2",namespace="ns1"} 4 @@ -352,13 +364,14 @@ func TestHPAStore(t *testing.T) { "kube_horizontalpodautoscaler_status_current_replicas", "kube_horizontalpodautoscaler_status_desired_replicas", "kube_horizontalpodautoscaler_status_condition", + "kube_horizontalpodautoscaler_annotation", "kube_horizontalpodautoscaler_labels", }, }, } for i, c := range cases { - c.Func = generator.ComposeMetricGenFuncs(hpaMetricFamilies(nil, nil)) - c.Headers = generator.ExtractMetricFamilyHeaders(hpaMetricFamilies(nil, nil)) + c.Func = generator.ComposeMetricGenFuncs(hpaMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList)) + c.Headers = generator.ExtractMetricFamilyHeaders(hpaMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList)) if err := c.run(); err != nil { t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) } diff --git a/internal/store/ingress.go b/internal/store/ingress.go index c05f16f7ce..0e1796e85a 100644 --- a/internal/store/ingress.go +++ b/internal/store/ingress.go @@ -61,7 +61,7 @@ func ingressMetricFamilies(allowAnnotationsList, allowLabelsList []string) []gen metric.Gauge, "", wrapIngressFunc(func(i *networkingv1.Ingress) *metric.Family { - annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", i.Annotations, allowLabelsList) + annotationKeys, annotationValues := createPrometheusLabelKeysValues("annotation", i.Annotations, allowAnnotationsList) return &metric.Family{ Metrics: []*metric.Metric{ { diff --git a/internal/store/ingress_test.go b/internal/store/ingress_test.go index e4871943ab..4e8e3d8f3c 100644 --- a/internal/store/ingress_test.go +++ b/internal/store/ingress_test.go @@ -47,19 +47,40 @@ func TestIngressStore(t *testing.T) { ` cases := []generateMetricsTestCase{ { + AllowAnnotationsList: []string{ + "app.k8s.io/owner", + }, Obj: &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "ingress1", Namespace: "ns1", ResourceVersion: "000000", + Annotations: map[string]string{ + "app": "mysql-server", + "app.k8s.io/owner": "@foo", + }, }, }, - Want: metadata + ` + Want: ` + # HELP kube_ingress_info Information about ingress. + # HELP kube_ingress_annotations Kubernetes annotations converted to Prometheus labels. + # HELP kube_ingress_labels Kubernetes labels converted to Prometheus labels. + # HELP kube_ingress_metadata_resource_version Resource version representing a specific version of ingress. + # TYPE kube_ingress_info gauge + # TYPE kube_ingress_annotations gauge + # TYPE kube_ingress_labels gauge + # TYPE kube_ingress_metadata_resource_version gauge kube_ingress_info{namespace="ns1",ingress="ingress1"} 1 kube_ingress_metadata_resource_version{namespace="ns1",ingress="ingress1"} 0 + kube_ingress_annotations{annotation_app_k8s_io_owner="@foo",namespace="ns1",ingress="ingress1"} 1 kube_ingress_labels{namespace="ns1",ingress="ingress1"} 1 `, - MetricNames: []string{"kube_ingress_info", "kube_ingress_metadata_resource_version", "kube_ingress_created", "kube_ingress_labels", "kube_ingress_path", "kube_ingress_tls"}, + MetricNames: []string{ + "kube_ingress_info", + "kube_ingress_metadata_resource_version", + "kube_ingress_annotations", + "kube_ingress_labels", + }, }, { Obj: &networkingv1.Ingress{ @@ -169,8 +190,8 @@ func TestIngressStore(t *testing.T) { }, } for i, c := range cases { - c.Func = generator.ComposeMetricGenFuncs(ingressMetricFamilies(nil, nil)) - c.Headers = generator.ExtractMetricFamilyHeaders(ingressMetricFamilies(nil, nil)) + c.Func = generator.ComposeMetricGenFuncs(ingressMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList)) + c.Headers = generator.ExtractMetricFamilyHeaders(ingressMetricFamilies(c.AllowAnnotationsList, c.AllowLabelsList)) if err := c.run(); err != nil { t.Errorf("unexpected collecting result in %vth run:\n%s", i, err) }