Skip to content

Commit

Permalink
fixes to older panels (various variable substitution has changed or m…
Browse files Browse the repository at this point in the history
…etrics names have changed or metrics are not enabled (yet) or 'prometheus' filters were not valid

rh-pre-commit.version: 2.3.0
rh-pre-commit.check-secrets: ENABLED
  • Loading branch information
gabemontero committed Jul 12, 2024
1 parent fa75ff9 commit 70b6019
Showing 1 changed file with 20 additions and 20 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1013,7 +1013,7 @@
"targets": [
{
"exemplar": true,
"expr": "tekton_pipelines_controller_running_pipelineruns_count{prometheus=\"openshift-user-workload-monitoring/user-workload\"}",
"expr": "tekton_pipelines_controller_running_pipelineruns_count",
"interval": "",
"legendFormat": "",
"refId": "A"
Expand Down Expand Up @@ -1071,7 +1071,7 @@
"targets": [
{
"exemplar": true,
"expr": "tekton_pipelines_controller_running_taskruns_count{prometheus=\"openshift-user-workload-monitoring/user-workload\"}",
"expr": "tekton_pipelines_controller_running_taskruns_count",
"interval": "",
"legendFormat": "",
"refId": "A"
Expand Down Expand Up @@ -1407,7 +1407,7 @@
"targets": [
{
"exemplar": true,
"expr": "rate(tekton_pipelines_controller_client_results{prometheus=\"openshift-user-workload-monitoring/user-workload\"}[1h])*3600",
"expr": "rate(tekton_pipelines_controller_client_results[1h])*3600",
"interval": "",
"legendFormat": "",
"refId": "A"
Expand Down Expand Up @@ -2239,7 +2239,7 @@
"targets": [
{
"exemplar": true,
"expr": "sum by (event_type) (rate(pac_watcher_pipelines_as_code_pipelinerun_count{prometheus=\"openshift-user-workload-monitoring/user-workload\"}[1h]))*3600",
"expr": "sum by (event_type) (rate(pac_watcher_pipelines_as_code_pipelinerun_count[1h]))*3600",
"interval": "",
"legendFormat": "",
"refId": "A"
Expand Down Expand Up @@ -2320,7 +2320,7 @@
"targets": [
{
"exemplar": true,
"expr": "avg(pac_watcher_workqueue_depth)",
"expr": "avg(pac_watcher_workqueue_unfinished_work_seconds_count)",
"interval": "",
"legendFormat": "",
"refId": "A"
Expand Down Expand Up @@ -2401,7 +2401,7 @@
"targets": [
{
"exemplar": true,
"expr": "rate(pac_watcher_client_results{prometheus=\"openshift-user-workload-monitoring/user-workload\"}[1h])*3600",
"expr": "rate(pac_watcher_client_results[1h])*3600",
"interval": "",
"legendFormat": "",
"refId": "A"
Expand Down Expand Up @@ -2960,7 +2960,7 @@
"targets": [
{
"exemplar": true,
"expr": "sum(rate(grpc_server_started_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\"}[$__rate_interval]))",
"expr": "sum(rate(grpc_server_started_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\"}[10m]))",
"interval": "",
"intervalFactor": 4,
"legendFormat": "Request Rate",
Expand Down Expand Up @@ -3019,7 +3019,7 @@
"targets": [
{
"exemplar": true,
"expr": "histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\"}[$__rate_interval])) by (le) )",
"expr": "histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\"}[10m])) by (le) )",
"interval": "",
"intervalFactor": 4,
"legendFormat": "Latency",
Expand Down Expand Up @@ -3078,7 +3078,7 @@
"targets": [
{
"exemplar": true,
"expr": "histogram_quantile(0.99, sum(rate(watcher_reconcile_latency_bucket{job=\"tekton-results-watcher\"}[$__rate_interval])) by (le) )",
"expr": "histogram_quantile(0.99, sum(rate(watcher_reconcile_latency_bucket{job=\"tekton-results-watcher\"}[10m])) by (le) )",
"interval": "",
"intervalFactor": 4,
"legendFormat": "Latency",
Expand Down Expand Up @@ -3164,7 +3164,7 @@
"targets": [
{
"exemplar": true,
"expr": "sum(rate(container_cpu_usage_seconds_total{pod=~\"tekton-results-api-.*\", namespace=\"tekton-results\", container=\"\"}[$__rate_interval]))",
"expr": "sum(rate(container_cpu_usage_seconds_total{pod=~\"tekton-results-api-.*\", namespace=\"tekton-results\", container=\"\"}[10m]))",
"instant": false,
"interval": "",
"intervalFactor": 4,
Expand All @@ -3173,7 +3173,7 @@
},
{
"exemplar": true,
"expr": "sum(rate(container_cpu_usage_seconds_total{pod=~\"tekton-results-watcher-.*\", namespace=\"tekton-results\", container=\"\"}[$__rate_interval]))",
"expr": "sum(rate(container_cpu_usage_seconds_total{pod=~\"tekton-results-watcher-.*\", namespace=\"tekton-results\", container=\"\"}[10m]))",
"hide": false,
"interval": "",
"intervalFactor": 4,
Expand Down Expand Up @@ -3362,7 +3362,7 @@
"targets": [
{
"exemplar": true,
"expr": "sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\"}[$__rate_interval])) by (grpc_method)",
"expr": "sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\"}[10m])) by (grpc_method)",
"interval": "",
"intervalFactor": 4,
"legendFormat": "{{grpc_method}}",
Expand Down Expand Up @@ -3459,7 +3459,7 @@
"targets": [
{
"exemplar": true,
"expr": "sum(increase(grpc_server_handling_seconds_bucket{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\"}[$__rate_interval])) by (le)",
"expr": "sum(increase(grpc_server_handling_seconds_bucket{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\"}[10m])) by (le)",
"interval": "",
"intervalFactor": 4,
"legendFormat": "{{le}}",
Expand Down Expand Up @@ -3582,15 +3582,15 @@
"targets": [
{
"exemplar": true,
"expr": "sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\", grpc_code!=\"OK\"}[$__rate_interval])) by (grpc_method)",
"expr": "sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\", grpc_code!=\"OK\"}[10m])) by (grpc_method)",
"interval": "",
"intervalFactor": 4,
"legendFormat": "RPC: {{grpc_method}}",
"refId": "error methods"
},
{
"exemplar": true,
"expr": "sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\", grpc_code=~\"Internal|Unavailable|Unknown|Unimplemented\"}[$__rate_interval])) by (grpc_code) / ignoring(grpc_code) group_left sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\", grpc_code!=\"OK\"}[$__rate_interval]))",
"expr": "sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\", grpc_code=~\"Internal|Unavailable|Unknown|Unimplemented\"}[10m])) by (grpc_code) / ignoring(grpc_code) group_left sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\", grpc_code!=\"OK\"}[10m]))",
"hide": false,
"interval": "",
"intervalFactor": 4,
Expand Down Expand Up @@ -3700,15 +3700,15 @@
"targets": [
{
"exemplar": true,
"expr": "sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\", grpc_code!=\"OK\"}[$__rate_interval])) by (grpc_method)",
"expr": "sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\", grpc_code!=\"OK\"}[10m])) by (grpc_method)",
"interval": "",
"intervalFactor": 4,
"legendFormat": "RPC: {{grpc_method}}",
"refId": "error methods"
},
{
"exemplar": true,
"expr": "sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\", grpc_code!~\"Internal|Unavailable|Unknown|Unimplemented\"}[$__rate_interval])) by (grpc_code) / ignoring(grpc_code) group_left sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\", grpc_code!=\"OK\"}[$__rate_interval]))",
"expr": "sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\", grpc_code!~\"Internal|Unavailable|Unknown|Unimplemented\"}[10m])) by (grpc_code) / ignoring(grpc_code) group_left sum(rate(grpc_server_handled_total{job=\"tekton-results-api\", grpc_service=~\"tekton.results.v1alpha2.*\", grpc_code!=\"OK\"}[10m]))",
"hide": false,
"interval": "",
"intervalFactor": 4,
Expand Down Expand Up @@ -3826,7 +3826,7 @@
"targets": [
{
"exemplar": true,
"expr": "sum(rate(watcher_reconcile_count{job=\"tekton-results-watcher\"}[$__rate_interval])) by (success)",
"expr": "sum(rate(watcher_reconcile_count{job=\"tekton-results-watcher\"}[10m])) by (success)",
"format": "time_series",
"interval": "",
"intervalFactor": 4,
Expand Down Expand Up @@ -3968,7 +3968,7 @@
"targets": [
{
"exemplar": true,
"expr": "sum(rate(watcher_workqueue_queue_latency_seconds_count{job=\"tekton-results-watcher\"}[$__rate_interval]))",
"expr": "sum(rate(watcher_workqueue_queue_latency_seconds_count{job=\"tekton-results-watcher\"}[10m]))",
"hide": false,
"interval": "",
"intervalFactor": 4,
Expand All @@ -3977,7 +3977,7 @@
},
{
"exemplar": true,
"expr": "sum(rate(watcher_reconcile_latency_count{job=\"tekton-results-watcher\"}[$__rate_interval]))",
"expr": "sum(rate(watcher_reconcile_latency_count{job=\"tekton-results-watcher\"}[10m]))",
"hide": false,
"interval": "",
"intervalFactor": 4,
Expand Down

0 comments on commit 70b6019

Please sign in to comment.