From f8c00944a015188bc9bd67c2457ce398c277335c Mon Sep 17 00:00:00 2001 From: Ben Clive Date: Wed, 3 Jul 2024 13:33:54 +0100 Subject: [PATCH] feat: exclude in pattern tokens --- pkg/pattern/drain/drain_test.go | 183 +++++++++++------------ pkg/pattern/drain/line_tokenizer.go | 1 + pkg/pattern/drain/line_tokenizer_test.go | 2 +- 3 files changed, 87 insertions(+), 99 deletions(-) diff --git a/pkg/pattern/drain/drain_test.go b/pkg/pattern/drain/drain_test.go index f725439e89d3..1f964a0fef70 100644 --- a/pkg/pattern/drain/drain_test.go +++ b/pkg/pattern/drain/drain_test.go @@ -33,25 +33,25 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { patterns: []string{ `ts=2024-04-16T15:10:43.192290389Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*19a1cce8-5f04-46e0-a124-292b0dd9b343/testcoordinator/*.log:{batch_kubernetes_io_controller_uid=\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\", batch_kubernetes_io_job_name=\"testcoordinator-job-2665838\", container=\"testcoordinator\", controller_uid=\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\", job=\"k6-cloud/testcoordinator\", job_name=\"testcoordinator-job-2665838\", name=\"testcoordinator\", namespace=\"k6-cloud\", pod=\"testcoordinator-job-2665838-9g8ds\"}"`, `ts=2024-04-16T15:10:43.551543875Z caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key="/var/log/pods/*35649bfd-52ff-4281-9294-5f65fd5a89fc/marketplaces-api/*.log:{container=\"marketplaces-api\", job=\"grafana-com/marketplaces-api\", name=\"marketplaces-api\", namespace=\"grafana-com\", pod=\"marketplaces-api-f67ff7567-gqrvb\", pod_template_hash=\"f67ff7567\"}"`, - `ts=<_> caller=filetarget.go:192 level=info component=logs logs_config=default msg="filetarget: watcher closed, tailer stopped, positions saved" path=/var/log/pods/*<_>/<_>/*.log`, - `ts=<_> caller=filetarget.go:313 level=info component=logs logs_config=default msg="watching new directory" directory=/var/log/pods/<_>/<_>`, - `ts=<_> caller=filetarget.go:326 level=info component=logs logs_config=default msg="removing directory from watcher" directory=/var/log/pods/<_>/<_>`, - `ts=<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" name=/var/log/pods/<_>/<_>/<_> op=CREATE`, - `ts=<_> caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*<_>/kube-proxy/*.log:{component=\"kube-proxy\", container=\"kube-proxy\", job=\"kube-system/<_>\", namespace=\"kube-system\", pod=\"kube-proxy-gke-ops-us-east-0-main-n2s32-1-1dd39c-32ae1dde-hmhw\", tier=\"node\"}"`, - `ts=<_> caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/<_>/*.log:{app=\"grafana\", conprof=\"true\", container=\"<_>\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`, - `ts=<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key="/var/log/pods/*<_>/<_>/*.log:{app=\"grafana\", conprof=\"true\", container=\"<_>\", instanceId=\"<_>\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"<_>\", plan=\"free\", pod=\"<_>\", pod_template_hash=\"<_>\<_>`, - `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Re-opening moved/deleted file /var/log/pods/<_>/<_>/<_> ..."`, - `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Seeked /var/log/pods/<_>/<_>/0.log - &{Offset:0 Whence:0}"`, - `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Successfully reopened /var/log/pods/<_>/<_>/<_>"`, - `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Waiting for /var/log/pods/<_>/<_>/0.log to appear..."`, + `ts=<_> caller=filetarget.go:192 level=info component=logs logs_config=default msg="filetarget: watcher closed, tailer stopped, positions saved" path=/var/log/pods/*<_>*.log`, + `ts=<_> caller=filetarget.go:313 level=info component=logs logs_config=default msg="watching new directory" directory=<_>`, + `ts=<_> caller=filetarget.go:326 level=info component=logs logs_config=default msg="removing directory from watcher" directory=<_>`, + `ts=<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" name=<_> op=CREATE`, + `ts=<_> caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*<_>*.log:{app=\"grafana\", conprof=\"true\", container=\"<_>\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`, + `ts=<_> caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*<_>*.log:{component=\"kube-proxy\", container=\"kube-proxy\", job=\"<_>\", namespace=\"kube-system\", pod=\"kube-proxy-gke-ops-us-east-0-main-n2s32-1-1dd39c-32ae1dde-hmhw\", tier=\"node\"}"`, + `ts=<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key="/var/log/pods/*<_>*.log:{app=\"grafana\", conprof=\"true\", container=\"<_>\", instanceId=\"<_>\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"<_>\", plan=\"free\", pod=\"<_>\", pod_template_hash=\"<_>\", resource_version=\"<_>\", slug=<_>`, + `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Re-opening moved/deleted file <_> ..."`, + `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Seeked <_> - &{Offset:0 Whence:0}"`, + `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Successfully reopened <_>"`, + `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Waiting for <_> to appear..."`, `ts=<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="bufio.Scanner: token too long"`, `ts=<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="logfmt syntax error at pos <_> on line 1: unexpected '\"'"`, - `ts=<_> caller=tailer.go:118 level=info component=logs logs_config=default component=tailer msg="position timer: exited" path=/var/log/pods/<_>/<_>/0.log`, - `ts=<_> caller=tailer.go:147 level=info component=logs logs_config=default component=tailer msg="tail routine: started" path=/var/log/pods/<_>/<_>/0.log`, - `ts=<_> caller=tailer.go:155 level=info component=logs logs_config=default component=tailer msg="tail routine: exited" path=/var/log/pods/<_>/<_>/0.log`, - `ts=<_> caller=tailer.go:164 level=info component=logs logs_config=default component=tailer msg="tail routine: tail channel closed, stopping tailer" path=/var/log/pods/<_>/<_>/0.log reason=null`, - `ts=<_> caller=tailer.go:207 level=info component=logs logs_config=default component=tailer msg="skipping update of position for a file which does not currently exist" path=/var/log/pods/<_>/<_>/0.log`, - `ts=<_> caller=tailer.go:245 level=info component=logs logs_config=default component=tailer msg="stopped tailing file" path=/var/log/pods/<_>/<_>/0.log`, + `ts=<_> caller=tailer.go:118 level=info component=logs logs_config=default component=tailer msg="position timer: exited" path=<_>`, + `ts=<_> caller=tailer.go:147 level=info component=logs logs_config=default component=tailer msg="tail routine: started" path=<_>`, + `ts=<_> caller=tailer.go:155 level=info component=logs logs_config=default component=tailer msg="tail routine: exited" path=<_>`, + `ts=<_> caller=tailer.go:164 level=info component=logs logs_config=default component=tailer msg="tail routine: tail channel closed, stopping tailer" path=<_> reason=null`, + `ts=<_> caller=tailer.go:207 level=info component=logs logs_config=default component=tailer msg="skipping update of position for a file which does not currently exist" path=<_>`, + `ts=<_> caller=tailer.go:245 level=info component=logs logs_config=default component=tailer msg="stopped tailing file" path=<_>`, `ts=<_> level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines node_id=<_> duration=<_>`, }, }, @@ -62,7 +62,8 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { patterns: []string{ `ts=2024-04-17T09:52:46.363974185Z caller=http.go:194 level=debug traceID=1b48f5156a61ca69 msg="GET /debug/pprof/delta_mutex (200) 1.161082ms"`, `ts=<_> caller=head.go:216 level=debug tenant=987678 msg="profile is empty after delta computation" metricName=memory`, - `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=<_> msg="POST /ingester.v1.IngesterService/Push (200) <_>"`}, + `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=<_> msg="POST /ingester.v1.IngesterService/Push (200) <_>"`, + }, }, { drain: New(DefaultConfig(), nil), @@ -86,7 +87,8 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { `ts=2024-05-02T12:17:22.242343806Z caller=http.go:194 level=debug traceID=404c6a83a18e66a4 orgID=75 msg="POST /ingest?aggregationType=average&from=1714652227232613927&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=0&spyName=gospy&units=goroutines&until=1714652242232506798 (200) 2.902485ms"`, `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=1819 msg="POST /pyroscope/ingest?aggregationType=sum&from=1714652230&name=<_>%7Bapp_kubernetes_io_instance%3Dflamegraph-com%2Capp_kubernetes_io_name%3Dflamegraph-com%2Ccluster%3Dflamegraph.com%2Cinstance%<_>%<_>%2Cjob%3Dkubernetes-pods%2Cnamespace%3Dflamegraph-com%2Cpod%<_>%2Cpod_template_hash%<_>%2Cpyroscope_tenant%3Dpyroscope%2Ctier%<_>%7D&sampleRate=0&spyName=scrape&units=samples&until=1714652240 (200) <_>"`, `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=75 msg="POST /ingest?aggregationType=&from=1714652227232613927&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=<_>&spyName=gospy&units=&until=1714652242232506798 (200) <_>"`, - `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=<_> msg="POST /push.v1.PusherService/Push (<_>) <_>"`}, + `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=<_> msg="POST /push.v1.PusherService/Push (<_>) <_>"`, + }, }, { drain: New(DefaultConfig(), nil), @@ -112,21 +114,21 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { `E0507 11:59:39.149450 4729 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=cluster-agent pod=appdynamics-cluster-agent-appdynamics-cluster-agent-56667dmbnkv_integration(69bc5e6c-0451-443e-af8a-c831871afbb8)\"" pod="integration/appdynamics-cluster-agent-appdynamics-cluster-agent-56667dmbnkv" podUID="69bc5e6c-0451-443e-af8a-c831871afbb8"`, `E0507 <_> 4731 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"overrides-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/enterprise-logs:callum-shard-firstlast-08\\\"\"" pod="loki-dev-010/overrides-exporter-98c77fd66-6zj6m" podUID="1ff5bf3e-5856-4f6f-ae04-273f2dee170b"`, `E0507 <_> 4733 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=prometheus pod=bryan-prometheus-0_bryan-prometheus(6dadfe71-eb19-4231-a96e-c64bb5499a1e)\"" pod="bryan-prometheus/bryan-prometheus-0" podUID="6dadfe71-eb19-4231-a96e-c64bb5499a1e"`, - `E0507 <_> <_> kuberuntime_manager.go:1256] container &Container{Name:grafana,Image:us.gcr.io/hosted-grafana/<_>,Command:[/bin/sh],Args:[-c set -e; while [ "$(pidof hgrun-pause)" = "" ]; do sleep 0.5; done;`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=agent pod=<_>(<_>)\"" pod="jaeger/<_>" podUID="<_>"`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cortex-gw\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=cortex-gw pod=<_>(<_>)\"" pod="faro/<_>" podUID="<_>"`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"gcom-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/frontend-monitoring:6a8eb5a\\\"\"" pod="faro/<_>" podUID="<_>"`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"goldpinger\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=goldpinger pod=<_>(<_>)\"" pod="goldpinger/<_>" podUID="<_>"`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off <_> restarting failed container=grafana pod=<_>(<_>)\"" pod="<_>/<_>" podUID="<_>"`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ErrImagePull: \"[rpc error: code = NotFound desc = failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/<_>\\\": failed to resolve reference \\\"us.gcr.io/hosted-grafana/<_>\\\": us.gcr.io/hosted-grafana/<_> not found, failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/<_>\\\": failed to resolve reference \\\"us.gcr.io/hosted-grafana/<_>\\\": unexpected status from <_>`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/<_>\\\"\"" pod="hosted-grafana/<_>" podUID="<_>"`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pdc\" with ErrImageNeverPull: \"Container image \\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\" is not present with pull policy of Never\"" pod="pdc/<_>" podUID="<_>"`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ruler\" with CreateContainerConfigError: \"secret \\\"ruler-alertmanager-token\\\" not found\"" pod="ge-metrics-federation/<_>" podUID="<_>"`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"support-agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=support-agent pod=<_>(<_>)\"" pod="support-agent/<_>" podUID="<_>"`, - `E0507 <_> <_> prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found" probeType="Readiness" pod="hosted-grafana/<_>" podUID="<_>" containerName="grafana"`, + `E0507 <_> <_> kuberuntime_manager.go:1256] container &Container{Name:grafana,<_>,Command:[/bin/sh],Args:[-c set -e; while [ "$(pidof hgrun-pause)" = "" ]; do sleep 0.5; done;`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=agent pod=<_>(<_>)\"" pod="<_>" podUID="<_>"`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cortex-gw\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=cortex-gw pod=<_>(<_>)\"" pod="<_>" podUID="<_>"`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"gcom-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/frontend-monitoring:6a8eb5a\\\"\"" pod="<_>" podUID="<_>"`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"goldpinger\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=goldpinger pod=<_>(<_>)\"" pod="<_>" podUID="<_>"`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off <_> restarting failed container=grafana pod=<_>(<_>)\"" pod="<_>" podUID="<_>"`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ErrImagePull: \"[rpc error: code = NotFound desc = failed to pull and unpack image \\\"<_>\\\": failed to resolve reference \\\"<_>\\\": <_> not found, failed to pull and unpack image \\\"<_>\\\": failed to resolve reference \\\"<_>\\\": unexpected status from HEAD request to <_> 403 Forbidden]\"" pod="<_>" podUID="<_>"`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"<_>\\\"\"" pod="<_>" podUID="<_>"`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pdc\" with ErrImageNeverPull: \"Container image \\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\" is not present with pull policy of Never\"" pod="<_>" podUID="<_>"`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ruler\" with CreateContainerConfigError: \"secret \\\"ruler-alertmanager-token\\\" not found\"" pod="<_>" podUID="<_>"`, + `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"support-agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=support-agent pod=<_>(<_>)\"" pod="<_>" podUID="<_>"`, + `E0507 <_> <_> prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found" probeType="Readiness" pod="<_>" podUID="<_>" containerName="grafana"`, `E0507 <_> <_> prober.go:239] "Unable to write all bytes from execInContainer" err="short write" expectedBytes=<_> actualBytes=10240`, - `E0507 <_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code = NotFound desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/<_>\": failed to resolve reference \"us.gcr.io/hosted-grafana/<_>\": us.gcr.io/hosted-grafana/<_> not found" image="us.gcr.io/hosted-grafana/<_>"`, - `E0507 <_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/<_>\": failed to resolve reference \"us.gcr.io/hosted-grafana/<_>\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/<_> 403 Forbidden" image="us.gcr.io/hosted-grafana/<_>"`, + `E0507 <_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code = NotFound desc = failed to pull and unpack image \"<_>\": failed to resolve reference \"<_>\": <_> not found" image="<_>"`, + `E0507 <_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = failed to pull and unpack image \"<_>\": failed to resolve reference \"<_>\": unexpected status from HEAD request to <_> 403 Forbidden" image="<_>"`, `E0507 <_> <_> remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"<_>\": not found" containerID="<_>"`, `E0507 <_> <_> remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found" containerID="<_>" cmd=["/bin/hgrun","check"]`, `I0507 11:59:31.815514 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hosted-grafana-pro) is not from ACR, return empty authentication`, @@ -134,30 +136,26 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { `I0507 11:59:34.834734 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95j2t\" (UniqueName: \"kubernetes.io/projected/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-kube-api-access-95j2t\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") "`, `I0507 11:59:34.834794 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"pdc-certs\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") "`, `I0507 11:59:34.834835 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"gcs-serviceaccount\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-gcs-serviceaccount\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") "`, - `I0507 11:59:34.836955 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs" (OuterVolumeSpecName: "pdc-certs") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "pdc-certs". PluginName "kubernetes.io/secret", VolumeGidValue ""`, - `I0507 11:59:34.841404 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-kube-api-access-95j2t" (OuterVolumeSpecName: "kube-api-access-95j2t") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "kube-api-access-95j2t". PluginName "kubernetes.io/projected", VolumeGidValue ""`, `I0507 11:59:34.841447 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-gcs-serviceaccount" (OuterVolumeSpecName: "gcs-serviceaccount") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "gcs-serviceaccount". PluginName "kubernetes.io/secret", VolumeGidValue ""`, - `I0507 11:59:34.935951 3224 reconciler_common.go:300] "Volume detached for volume \"gcs-serviceaccount\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-gcs-serviceaccount\") on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\""`, - `I0507 11:59:34.935988 3224 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-95j2t\" (UniqueName: \"kubernetes.io/projected/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-kube-api-access-95j2t\") on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\""`, `I0507 11:59:34.936025 3224 reconciler_common.go:300] "Volume detached for volume \"pdc-certs\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs\") on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\""`, - `I0507 11:59:38.092172 4527 kubelet.go:2426] "SyncLoop (PLEG): event for pod" pod="otel-demo/otel-demo-dev-checkoutservice-6ddf9b978b-zqrsr" event={"ID":"f263b787-926e-459a-95a0-f9ef8e4e9bc2","Type":"ContainerStarted","Data":"95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9"}`, `I0507 11:59:38.116658 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hg-plugins) is not from ACR, return empty authentication`, `I0507 11:59:39.168633 2776 kubelet.go:2493] "SyncLoop (probe)" probe="readiness" status="" pod="hosted-grafana/dafdeveuwest2-grafana-7845d969b5-f8h5q"`, `I0507 <_> 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hgrun) is not from ACR, return empty authentication`, + `I0507 <_> 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "<_>" (OuterVolumeSpecName: "<_>") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "<_>". PluginName "<_>", VolumeGidValue ""`, + `I0507 <_> 3224 reconciler_common.go:300] "Volume detached for volume \"<_>\" (UniqueName: \"<_>\") on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\""`, `I0507 <_> 6247 prober.go:107] "Probe failed" probeType="Readiness" pod="grafana-agent/grafana-agent-helm-4" podUID="c36c5200-1cd6-4093-893c-c022f91af996" containerName="grafana-agent" probeResult="failure" output="Get \"http://10.0.99.125:3090/-/ready\": dial tcp 10.0.99.125:3090: connect: connection refused"`, - `I0507 <_> <_> <_>] "Cleaned up orphaned pod volumes dir" podUID="<_>" path="/var/lib/kubelet/pods/<_>/volumes"`, - `I0507 <_> <_> <_>] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/<_>" event={"ID":"<_>","Type":"<_>","Data":"<_>"}`, - `I0507 <_> <_> <_>] "SyncLoop DELETE" source="api" pods=["hosted-grafana/<_>"]`, - `I0507 <_> <_> <_>] "SyncLoop REMOVE" source="api" pods=["hosted-grafana/<_>"]`, + `I0507 <_> <_> <_>] "Cleaned up orphaned pod volumes dir" podUID="<_>" path="<_>"`, + `I0507 <_> <_> <_>] "SyncLoop (PLEG): event for pod" pod="<_>" event={"ID":"<_>","Type":"<_>","Data":"<_>"}`, + `I0507 <_> <_> <_>] "SyncLoop DELETE" source="api" pods=["<_>"]`, + `I0507 <_> <_> <_>] "SyncLoop REMOVE" source="api" pods=["<_>"]`, `I0507 <_> <_> generic.go:334] "Generic (PLEG): container finished" podID="<_>" containerID="<_>" exitCode=1`, - `I0507 <_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="hosted-grafana/<_>"`, - `I0507 <_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="readiness" status="ready" pod="hosted-grafana/<_>"`, - `I0507 <_> <_> kubelet_getters.go:187] "Pod status updated" pod="kube-system/<_>" status="Running"`, - `I0507 <_> <_> kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="<_>/<_>" secret="" err="secret \"<_>\" not found"`, - `I0507 <_> <_> kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="grafana-apps/<_>" secret="" err="secret \"dockerhub\" not found"`, + `I0507 <_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="<_>"`, + `I0507 <_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="readiness" status="ready" pod="<_>"`, + `I0507 <_> <_> kubelet_getters.go:187] "Pod status updated" pod="<_>" status="Running"`, + `I0507 <_> <_> kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="<_>" secret="" err="secret \"<_>\" not found"`, `I0507 <_> <_> pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"<_>"} err="failed to get container status \"<_>\": rpc error: code = NotFound desc = an error occurred when try to find container \"<_>\": not found"`, - `I0507 <_> <_> prober.go:107] "Probe failed" probeType="Readiness" pod="<_>/<_>" podUID="<_>" containerName="<_>" probeResult="failure" output="HTTP probe failed with statuscode: <_>"`, - `I0507 <_> <_> prober.go:107] "Probe failed" probeType="Readiness" pod="hosted-grafana/<_>" podUID="<_>" containerName="grafana" probeResult="failure" output=<`, + `I0507 <_> <_> prober.go:107] "Probe failed" probeType="Readiness" pod="<_>" podUID="<_>" containerName="<_>" probeResult="failure" output="HTTP probe failed with statuscode: <_>"`, + `I0507 <_> <_> prober.go:107] "Probe failed" probeType="Readiness" pod="<_>" podUID="<_>" containerName="grafana" probeResult="failure" output=<`, `I0507 <_> <_> scope.go:117] "RemoveContainer" containerID="<_>"`, `I0507 <_> <_> cache.go:40] re-using cached key and certificate`, `IPv4: martian source <_> from <_>, on dev eth0`, @@ -189,26 +187,24 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { `time="2024-05-07T11:59:43.954289531Z" level=info msg="CreateContainer within sandbox \"ee9dc07bca79ef7dffe2a6eb326e27236e9e97c35913c7aae16ee0a62632fc25\" for &ContainerMetadata{Name:cortex-gw,Attempt:1660,} returns container id \"93fa5decd62691912f90c9b27526f5e00183239bfa4d3f4ea8578a7873b9c2b4\""`, `time="<_>" level=error msg="ContainerStatus for \"<_>\" failed" error="rpc error: code = NotFound desc = an error occurred when try to find container \"<_>\": not found"`, `time="<_>" level=error msg="ExecSync for \"<_>\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found"`, - `time="<_>" level=error msg="PullImage \"us.gcr.io/hosted-grafana/<_>\" failed" error="failed to pull and unpack image \"us.gcr.io/hosted-grafana/<_>\": failed to resolve reference \"us.gcr.io/hosted-grafana/<_>\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/<_> 403 Forbidden"`, - `time="<_>" level=error msg="PullImage \"us.gcr.io/hosted-grafana/<_>\" failed" error="rpc error: code = NotFound desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/<_>\": failed to resolve reference \"us.gcr.io/hosted-grafana/<_>\": us.gcr.io/hosted-grafana/<_> not found"`, + `time="<_>" level=error msg="PullImage \"<_>\" failed" error="failed to pull and unpack image \"<_>\": failed to resolve reference \"<_>\": unexpected status from HEAD request to <_> 403 Forbidden"`, + `time="<_>" level=error msg="PullImage \"<_>\" failed" error="rpc error: code = NotFound desc = failed to pull and unpack image \"<_>\": failed to resolve reference \"<_>\": <_> not found"`, `time="<_>" level=info msg="CreateContainer within sandbox \"<_>\" for &ContainerMetadata{Name:grafana,<_>,} returns container id \"<_>\""`, `time="<_>" level=info msg="CreateContainer within sandbox \"<_>\" for &ContainerMetadata{Name:hgrun,Attempt:0,} returns container id \"<_>\""`, `time="<_>" level=info msg="CreateContainer within sandbox \"<_>\" for container &ContainerMetadata{Name:grafana,<_>,}"`, `time="<_>" level=info msg="CreateContainer within sandbox \"<_>\" for container &ContainerMetadata{Name:hgrun,Attempt:0,}"`, `time="<_>" level=info msg="ImageCreate event name:\"<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, - `time="<_>" level=info msg="ImageCreate event name:\"us.gcr.io/hosted-grafana/<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, `time="<_>" level=info msg="ImageUpdate event name:\"<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, - `time="<_>" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, - `time="<_>" level=info msg="PullImage \"us.gcr.io/hosted-grafana/<_>\" returns image reference \"<_>\""`, - `time="<_>" level=info msg="PullImage \"us.gcr.io/hosted-grafana/<_>\""`, - `time="<_>" level=info msg="Pulled image \"us.gcr.io/hosted-grafana/<_>\" with image id \"<_>\", repo tag \"us.gcr.io/hosted-grafana/<_>\", repo digest \"us.gcr.io/hosted-grafana/<_>@<_>\", size \"<_>\" in <_>"`, + `time="<_>" level=info msg="PullImage \"<_>\" returns image reference \"<_>\""`, + `time="<_>" level=info msg="PullImage \"<_>\""`, + `time="<_>" level=info msg="Pulled image \"<_>\" with image id \"<_>\", repo tag \"<_>\", repo digest \"<_>@<_>\", size \"<_>\" in <_>"`, `time="<_>" level=info msg="RemoveContainer for \"<_>\" returns successfully"`, `time="<_>" level=info msg="RemoveContainer for \"<_>\""`, `time="<_>" level=info msg="StartContainer for \"<_>\" returns successfully"`, `time="<_>" level=info msg="StartContainer for \"<_>\""`, `time="<_>" level=info msg="cleaning up dead shim" namespace=k8s.io`, `time="<_>" level=info msg="shim disconnected" id=<_> namespace=k8s.io`, - `time="<_>" level=info msg="stop pulling image us.gcr.io/hosted-grafana/<_> active requests=0, bytes read=<_>"`, + `time="<_>" level=info msg="stop pulling image <_> active requests=0, bytes read=<_>"`, `time="<_>" level=info msg="trying next host - response was http.StatusNotFound" host=us.gcr.io`, `time="<_>" level=warning msg="cleaning up after shim disconnected" id=<_> namespace=k8s.io`, `var-lib-containerd-tmpmounts-containerd\<_> Deactivated successfully.`, @@ -222,10 +218,10 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { `[2024-05-07 10:55:40,626] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180391157, size=16991045, lastModifiedTime=1715075754780, largestRecordTimestamp=Some(1715075754774)),LogSegment(baseOffset=180393429, size=16997692, lastModifiedTime=1715075760206, largestRecordTimestamp=Some(1715075760186)),LogSegment(baseOffset=180395889, size=16998200, lastModifiedTime=1715075765542, largestRecordTimestamp=Some(1715075765526)),LogSegment(baseOffset=180398373, size=16977347, lastModifiedTime=1715075770515, largestRecordTimestamp=Some(1715075770504)) (kafka.log.LocalLog$)`, `[2024-05-07 10:55:53,038] INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-1, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=447957, size=948, lastModifiedTime=1715059232052, largestRecordTimestamp=Some(1715059232002)),LogSegment(baseOffset=447969, size=948, lastModifiedTime=1715059424352, largestRecordTimestamp=Some(1715059424301)) (kafka.log.LocalLog$)`, `[2024-05-07 10:55:53,<_>] INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=<_>, size=948, lastModifiedTime=<_>, largestRecordTimestamp=Some(<_>)) (kafka.log.LocalLog$)`, - `[2024-05-07 <_>,<_>] INFO Deleted log /bitnami/kafka/data/<_>/<_> (kafka.log.LogSegment)`, - `[2024-05-07 <_>,<_>] INFO Deleted offset index /bitnami/kafka/data/<_>/<_> (kafka.log.LogSegment)`, - `[2024-05-07 <_>,<_>] INFO Deleted producer state snapshot /bitnami/kafka/data/<_>/<_> (kafka.log.SnapshotFile)`, - `[2024-05-07 <_>,<_>] INFO Deleted time index /bitnami/kafka/data/<_>/<_> (kafka.log.LogSegment)`, + `[2024-05-07 <_>,<_>] INFO Deleted log <_> (kafka.log.LogSegment)`, + `[2024-05-07 <_>,<_>] INFO Deleted offset index <_> (kafka.log.LogSegment)`, + `[2024-05-07 <_>,<_>] INFO Deleted producer state snapshot <_> (kafka.log.SnapshotFile)`, + `[2024-05-07 <_>,<_>] INFO Deleted time index <_> (kafka.log.LogSegment)`, `[2024-05-07 <_>,<_>] INFO [LocalLog partition=<_>, dir=/bitnami/kafka/data] Rolled new log segment at offset <_> in <_> ms. (kafka.log.LocalLog)`, `[2024-05-07 <_>,<_>] INFO [ProducerStateManager partition=<_>] Wrote producer snapshot at offset <_> with 0 producer ids in <_> ms. (kafka.log.ProducerStateManager)`, `[2024-05-07 <_>,<_>] INFO [UnifiedLog partition=<_>, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=<_>, size=<_>, lastModifiedTime=<_>, largestRecordTimestamp=Some(<_>)) due to retention size <_> breach. Log size after deletion will be <_> (kafka.log.UnifiedLog)`, @@ -241,50 +237,38 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { format: FormatUnknown, patterns: []string{ `I0507 12:02:27.947830 1 nodeutilization.go:274] "Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers"`, - `I0507 12:02:27.988834 1 defaultevictor.go:202] "Pod fails the following checks" pod="netfilter-exporter/netfilter-exporter-vsqft" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, `I0507 12:04:17.595169 1 descheduler.go:155] Building a pod evictor`, `I0507 12:04:17.596431 1 nodeutilization.go:204] "Node is underutilized" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-4z2l" usage={"cpu":"984m","memory":"611Mi","pods":"16"} usagePercentage={"cpu":12.44,"memory":2.15,"pods":25}`, `I0507 12:04:17.596484 1 highnodeutilization.go:107] "Criteria for a node below target utilization" CPU=50 Mem=50 Pods=100`, `I0507 12:04:17.596504 1 highnodeutilization.go:108] "Number of underutilized nodes" totalNumber=1`, `I0507 12:04:17.596528 1 nodeutilization.go:260] "Total capacity to be moved" CPU=5060 Mem=112216292800 Pods=163`, `I0507 12:04:17.596651 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/metrics-server-v0.6.3-68f5b7c4d5-t5mz8" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 12:04:17.596685 1 defaultevictor.go:202] "Pod fails the following checks" pod="agent-logs/agent-lmlhl" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 12:04:17.596722 1 defaultevictor.go:202] "Pod fails the following checks" pod="startup/startup-sjjws" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`, `I0507 12:04:17.596803 1 defaultevictor.go:202] "Pod fails the following checks" pod="gadget/gadget-zjjts" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 12:04:17.596827 1 defaultevictor.go:202] "Pod fails the following checks" pod="netfilter-exporter/netfilter-exporter-jkrhn" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, `I0507 <_> 1 <_>] "Evicting pods from node" node="<_>" usage={"cpu":"<_>","memory":"<_>","pods":"<_>"}`, `I0507 <_> 1 <_>] "No removable pods on node, try next node" node="<_>"`, `I0507 <_> 1 <_>] "Number of evicted pods" totalEvicted=<_>`, `I0507 <_> 1 <_>] "Pods on node" node="<_>" allPods=<_> nonRemovablePods=<_> removablePods=<_>`, `I0507 <_> 1 <_>] "Total number of pods evicted" extension point="Balance" evictedPods=<_>`, - `I0507 <_> 1 <_>] k8s.io/client-go/informers/<_> Watch close - *<_> total <_> items received`, - `I0507 <_> 1 defaultevictor.go:163] "pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable" pod="<_>/<_>"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>/<_>" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="agent-logs/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="conntrack-exporter/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="ge-logs/<_>" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="goldpinger/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="insight-logs/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/<_>" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/<_>" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/<_>" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/<_>" checks="[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="loki-dev-ssd/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="netfilter-exporter/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="node-exporter/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/<_>" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="pyroscope-ebpf/<_>" checks="pod is a DaemonSet pod"`, - `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="startup/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`, - `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, <_> <_><_> <_> <_><_> <_> <_>]"`, - `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, insufficient <_>, insufficient <_>]"`, - `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, insufficient <_>]"`, - `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>, insufficient <_>]"`, - `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>]"`, - `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="insufficient cpu"`, + `I0507 <_> 1 <_>] <_> Watch close - *<_> total <_> items received`, + `I0507 <_> 1 defaultevictor.go:163] "pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable" pod="<_>"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>" checks="[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods"`, + `I0507 <_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>" checks="pod is a DaemonSet pod"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>" node:="<_>" error:="[pod node selector does not match the node label, <_> <_><_> <_> <_><_> <_> <_>]"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>" node:="<_>" error:="[pod node selector does not match the node label, insufficient <_>, insufficient <_>]"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>" node:="<_>" error:="[pod node selector does not match the node label, insufficient <_>]"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>" node:="<_>" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>, insufficient <_>]"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>" node:="<_>" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>]"`, + `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>" node:="<_>" error:="insufficient cpu"`, `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="<_>" error:="[insufficient <_>, insufficient <_>]"`, `I0507 <_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="<_>" error:="pod node selector does not match the node label"`, - `I0507 <_> 1 node.go:339] "no Pod antiaffinity rule found" pod="<_>/<_>"`, + `I0507 <_> 1 node.go:339] "no Pod antiaffinity rule found" pod="<_>"`, `I0507 <_> 1 nodeutilization.go:207] "Node is overutilized" node="<_>" usage={"cpu":"<_>","memory":"<_>","pods":"<_>"} usagePercentage={"cpu"<_>,"memory"<_>,"pods"<_>}`, }, }, @@ -293,8 +277,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { inputFile: "testdata/vault.txt", format: FormatUnknown, patterns: []string{ - `2024-05-07T10:56:38.667Z [INFO] expiration: revoked lease: lease_id=auth/gcp/login/h4c031a99aa555040a0dd99864d828e946c6d4e31f4f5178757183def61f9d104`, - `<_> [INFO] expiration: revoked lease: lease_id=auth/kubernetes/<_>/login/<_>`, + `<_> [INFO] expiration: revoked lease: lease_id=<_>`, }, }, { @@ -316,7 +299,9 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, `2024-05-08 15:23:56.624 [INFO][76] felix/summary.go 100: Summarising 1 dataplane reconciliation loops over 200ms: avg=10ms longest=10ms (resync-routes-v4,resync-routes-v4,resync-rules-v4,resync-wg)`, + `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "", "", "", "", "", "", "", "", "tVnHkvAo15HuiPy0", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="raw"`, `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "6gwbT8clXdHdC1b1"} chainName="PREROUTING" expectedRuleIDs=[]string{"6gwbT8clXdHdC1b1", "", "", "", ""} ipVersion=0x4 table="raw"`, + `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", ""} chainName="INPUT" expectedRuleIDs=[]string{"Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`, `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "tVnHkvAo15HuiPy0", "", "", "", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`, `2024-05-08 15:23:58.169 [INFO][2333] felix/summary.go 100: Summarising 35 dataplane reconciliation loops over 1m2s: avg=12ms longest=46ms (resync-filter-v4,resync-filter-v6,resync-mangle-v4,resync-mangle-v6,update-filter-v4,update-filter-v6)`, `2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 957: Examining link for MTU calculation mtu=1500 name="eth0"`, @@ -346,15 +331,15 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { `2024-05-08 <_> [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, `2024-05-08 <_> [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, `2024-05-08 <_> [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, - `2024-05-08 <_> [DEBUG][76] felix/route_table.go 880: Processing route: 254 <_> <_>/32 ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, - `2024-05-08 <_> [DEBUG][76] felix/route_table.go 915: Route is correct dest=<_>/32 ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 <_> [DEBUG][76] felix/route_table.go 880: Processing route: 254 <_> <_> ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 <_> [DEBUG][76] felix/route_table.go 915: Route is correct dest=<_> ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, `2024-05-08 <_> [DEBUG][<_>] felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{}`, `2024-05-08 <_> [DEBUG][<_>] felix/health.go 167: Health: <_>`, `2024-05-08 <_> [DEBUG][<_>] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{<_>, <_>, loc:(*time.Location)(0x4ce3aa0)}}`, `2024-05-08 <_> [DEBUG][<_>] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{<_>, <_>, loc:(*time.Location)(0x4ce3aa0)}}`, `2024-05-08 <_> [DEBUG][<_>] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{<_>, <_>, loc:(*time.Location)(0x4ce3aa0)}}`, `2024-05-08 <_> [DEBUG][<_>] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"}`, - `2024-05-08 <_> [DEBUG][<_>] felix/health.go <_> GET /<_>`, + `2024-05-08 <_> [DEBUG][<_>] felix/health.go <_> GET <_>`, `2024-05-08 <_> [DEBUG][<_>] felix/int_dataplane.go 1773: Refreshing IP sets state`, `2024-05-08 <_> [DEBUG][<_>] felix/int_dataplane.go 1807: Applying dataplane updates`, `2024-05-08 <_> [DEBUG][<_>] felix/int_dataplane.go 2080: Asked to reschedule. delay=<_>`, @@ -371,7 +356,10 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { `2024-05-08 <_> [DEBUG][<_>] felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`, `2024-05-08 <_> [DEBUG][<_>] felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`, `2024-05-08 <_> [DEBUG][<_>] felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, <_>, <_>, time.Local)}} type=""`, - `2024-05-08 <_> [DEBUG][<_>] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="<_>"`, + `2024-05-08 <_> [DEBUG][<_>] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="filter"`, + `2024-05-08 <_> [DEBUG][<_>] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="mangle"`, + `2024-05-08 <_> [DEBUG][<_>] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="nat"`, + `2024-05-08 <_> [DEBUG][<_>] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="raw"`, `2024-05-08 <_> [DEBUG][<_>] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="<_>"`, `2024-05-08 <_> [DEBUG][<_>] felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4`, `2024-05-08 <_> [DEBUG][<_>] felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4`, @@ -382,7 +370,6 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { `2024-05-08 <_> [DEBUG][<_>] felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4"`, `2024-05-08 <_> [DEBUG][<_>] felix/xdp_state.go 968: Processing member updates. family=4`, `2024-05-08 <_> [INFO][<_>] felix/summary.go 100: Summarising <_> dataplane reconciliation loops over <_> avg=<_> longest=<_> (<_>)`, - `2024-05-08 <_> [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "<_><_><_> "<_><_> "<_><_> "<_><_> "<_><_> "<_><_> "<_><_> "<_><_> <_>", "", ""<_> <_><_><_> <_>"<_> <_><_><_><_><_> <_><_><_><_><_><_><_><_>", "", "", "", "", "", "", "", "", "", "", "", ""<_>`, `bird: Netlink: No route to host`, }, }, diff --git a/pkg/pattern/drain/line_tokenizer.go b/pkg/pattern/drain/line_tokenizer.go index f69b2a6ff286..075cd32c8ff0 100644 --- a/pkg/pattern/drain/line_tokenizer.go +++ b/pkg/pattern/drain/line_tokenizer.go @@ -33,6 +33,7 @@ func newPunctuationTokenizer() *punctuationTokenizer { excluded['-'] = 1 excluded['.'] = 1 excluded[':'] = 1 + excluded['/'] = 1 return &punctuationTokenizer{ includeDelimiters: included, excludeDelimiters: excluded, diff --git a/pkg/pattern/drain/line_tokenizer_test.go b/pkg/pattern/drain/line_tokenizer_test.go index 0223bdd11017..1193c62898bb 100644 --- a/pkg/pattern/drain/line_tokenizer_test.go +++ b/pkg/pattern/drain/line_tokenizer_test.go @@ -77,7 +77,7 @@ var testCases = []TestCase{ name: "longer line", line: "09:17:38.033366 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_counter.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_Metric.action_is_drop.reason_is_queue_full 0 1717060658", want: map[string][]string{ - typePunctuation: {`09:17:38.033366`, `▶`, `INFO`, `route`, `ops`, `sending`, `to`, `dest`, `https:`, `/`, `/`, `graphite-cortex-ops-blocks-us-east4.grafana.net`, `/`, `graphite`, `/`, `metrics:`, `service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_counter.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_Metric.action_is_drop.reason_is_queue_full`, `0`, `1717060658`}, + typePunctuation: {`09:17:38.033366`, `▶`, `INFO`, `route`, `ops`, `sending`, `to`, `dest`, `https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics:`, `service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_counter.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_Metric.action_is_drop.reason_is_queue_full`, `0`, `1717060658`}, typeSplitting: {`09:`, `17:`, `38.033366`, `▶`, `INFO`, ``, `route`, `ops`, `sending`, `to`, `dest`, `https:`, `//graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics:`, ``, `service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_counter.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_Metric.action_is_drop.reason_is_queue_full`, `0`, `1717060658`}, }, },