From e2d9bc9e5146d8c1d34d129e88b0ffd59bca4d4d Mon Sep 17 00:00:00 2001 From: Salva Corts Date: Thu, 31 Oct 2024 15:46:14 +0100 Subject: [PATCH 1/2] feat(blooms): Only write key and key=value to blooms --- pkg/storage/bloom/v1/tokenizer.go | 1 - pkg/storage/bloom/v1/tokenizer_test.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/storage/bloom/v1/tokenizer.go b/pkg/storage/bloom/v1/tokenizer.go index 59654747832d8..fc3d2c6b13b11 100644 --- a/pkg/storage/bloom/v1/tokenizer.go +++ b/pkg/storage/bloom/v1/tokenizer.go @@ -25,7 +25,6 @@ func (t *StructuredMetadataTokenizer) Tokens(kv push.LabelAdapter) iter.Iterator combined := fmt.Sprintf("%s=%s", kv.Name, kv.Value) t.tokens = append(t.tokens[:0], kv.Name, t.prefix+kv.Name, - kv.Value, t.prefix+kv.Value, combined, t.prefix+combined, ) return iter.NewSliceIter(t.tokens) diff --git a/pkg/storage/bloom/v1/tokenizer_test.go b/pkg/storage/bloom/v1/tokenizer_test.go index 0aeb0ba1f5510..6fbb77021fb89 100644 --- a/pkg/storage/bloom/v1/tokenizer_test.go +++ b/pkg/storage/bloom/v1/tokenizer_test.go @@ -14,7 +14,7 @@ func TestStructuredMetadataTokenizer(t *testing.T) { tokenizer := NewStructuredMetadataTokenizer("chunk") metadata := push.LabelAdapter{Name: "pod", Value: "loki-1"} - expected := []string{"pod", "chunkpod", "loki-1", "chunkloki-1", "pod=loki-1", "chunkpod=loki-1"} + expected := []string{"pod", "chunkpod", "pod=loki-1", "chunkpod=loki-1"} tokenIter := tokenizer.Tokens(metadata) got, err := v2.Collect(tokenIter) From 5ece6750d58579be5f8bfbcc2515b6088651b7a2 Mon Sep 17 00:00:00 2001 From: Salva Corts Date: Thu, 31 Oct 2024 16:32:58 +0100 Subject: [PATCH 2/2] update docs --- docs/sources/operations/bloom-filters.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/operations/bloom-filters.md b/docs/sources/operations/bloom-filters.md index 63b0c4ecfaa6e..a62e67efbc900 100644 --- a/docs/sources/operations/bloom-filters.md +++ b/docs/sources/operations/bloom-filters.md @@ -176,10 +176,10 @@ If there are new TSDB files or any of them have changed, the planner will create The builder pulls a task from the planner's queue and processes the containing streams and chunks. For a given stream, the builder will iterate through all the log lines inside its new chunks and build a bloom for the stream. In case of changes for a previously processed TSDB file, builders will try to reuse blooms from existing blocks instead of building new ones from scratch. -The builder converts structured metadata from each log line of each chunk of a stream and appends the hash of each key, value, and key-value pair to the bloom, followed by the hashes combined with the chunk identifier. +The builder converts structured metadata from each log line of each chunk of a stream and appends the hash of each key, and key-value pair to the bloom, followed by the hashes combined with the chunk identifier. The first set of hashes allows gateways to skip whole streams, while the latter is for skipping individual chunks. -For example, given structured metadata `foo=bar` in the chunk `c6dj8g`, we append to the stream bloom the following hashes: `hash("foo")`, `hash("bar")`, `hash("foo=bar")`, `hash("c6dj8g" + "foo")` ... `hash("c6dj8g" + "foo=bar")`. +For example, given structured metadata `foo=bar` in the chunk `c6dj8g`, we append to the stream bloom the following hashes: `hash("foo")`, `hash("foo=bar")`, `hash("c6dj8g" + "foo")` and `hash("c6dj8g" + "foo=bar")`. ## Query sharding