diff --git a/tests/tasks/generators/clusterloader/load-slos.yaml b/tests/tasks/generators/clusterloader/load-slos.yaml index c04b84aa..d7c1d305 100644 --- a/tests/tasks/generators/clusterloader/load-slos.yaml +++ b/tests/tasks/generators/clusterloader/load-slos.yaml @@ -141,6 +141,11 @@ spec: effect: NoSchedule EOF cat $(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/exporters/kube-state-metrics/deployment.yaml + + # TODO: Remove this once we fix https://github.com/kubernetes/kubernetes/issues/126578 or find a better way to work around it. + rm $(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/default/prometheus-serviceMonitorCoreDNS.yaml + rm $(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/default/prometheus-serviceMonitorLegacyKubeDNS.yaml + fi # Building clusterloader2 binary cd $(workspaces.source.path)/perf-tests/clusterloader2/ @@ -162,10 +167,6 @@ spec: fi cat $(workspaces.source.path)/perf-tests/clusterloader2/testing/load/config.yaml cd $(workspaces.source.path)/perf-tests/clusterloader2/ - # [ToDo] To temporarily stop bleeding we delete these. - # Related issue - https://github.com/kubernetes/kubernetes/issues/126578 - kubectl --kubeconfig=$KUBECONFIG delete servicemonitor kube-dns -n monitoring - kubectl --kubeconfig=$KUBECONFIG delete servicemonitor coredns -n monitoring ENABLE_EXEC_SERVICE=false ./clusterloader --kubeconfig=$KUBECONFIG --testconfig=$(workspaces.source.path)/perf-tests/clusterloader2/testing/load/config.yaml --testoverrides=$(workspaces.source.path)/overrides.yaml --nodes=$(params.nodes) --provider=eks --report-dir=$(workspaces.results.path) --alsologtostderr --v=2 exit_code=$? if [ $exit_code -eq 0 ]; then