Skip to content

Commit

Permalink
fix(blooms): Fix check for skipping most recent data when filtering b…
Browse files Browse the repository at this point in the history
…looms (#15300)


After changing the default of  `-bloom-build.planner.min-table-offset` from `1` (yesterday) to `0` (today), we noticed that the bloom gateways reported a very high percentage of missing series in blocks.

This is because the gateway received filter requests for today's block, but the requested series is too new and has not been added to the bloom block yet.

In the bloom querier on the index gateway there is a condition under which requests to the bloom gateway are skipped, because the blooms are likely not available yet.

This PR changes this condition to correctly take the min-table-offset as well as the planning-interval into account.

Signed-off-by: Christian Haudum <christian.haudum@gmail.com>
  • Loading branch information
chaudum authored Dec 6, 2024
1 parent 079779e commit 78d3c44
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 27 deletions.
50 changes: 24 additions & 26 deletions pkg/bloomgateway/querier.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ func newQuerierMetrics(registerer prometheus.Registerer, namespace, subsystem st
}

type QuerierConfig struct {
// MinTableOffset is derived from the compactor's MinTableOffset
MinTableOffset int
BuildInterval time.Duration
BuildTableOffset int
}

// BloomQuerier is a store-level abstraction on top of Client
Expand Down Expand Up @@ -119,30 +119,28 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
preFilterSeries := len(grouped)

// Do not attempt to filter chunks for which there are no blooms
if bq.cfg.MinTableOffset > 0 {
minAge := truncateDay(model.Now()).Add(-1 * config.ObjectStorageIndexRequiredPeriod * time.Duration(bq.cfg.MinTableOffset-1))
if through.After(minAge) {
level.Debug(logger).Log(
"msg", "skip too recent chunks",
"tenant", tenant,
"from", from.Time(),
"through", through.Time(),
"responses", 0,
"preFilterChunks", preFilterChunks,
"postFilterChunks", preFilterChunks,
"filteredChunks", 0,
"preFilterSeries", preFilterSeries,
"postFilterSeries", preFilterSeries,
"filteredSeries", 0,
)

bq.metrics.chunksTotal.Add(float64(preFilterChunks))
bq.metrics.chunksFiltered.Add(0)
bq.metrics.seriesTotal.Add(float64(preFilterSeries))
bq.metrics.seriesFiltered.Add(0)

return chunkRefs, false, nil
}
minAge := model.Now().Add(-1 * (config.ObjectStorageIndexRequiredPeriod*time.Duration(bq.cfg.BuildTableOffset) + 2*bq.cfg.BuildInterval))
if through.After(minAge) {
level.Info(logger).Log(
"msg", "skip too recent chunks",
"tenant", tenant,
"from", from.Time(),
"through", through.Time(),
"responses", 0,
"preFilterChunks", preFilterChunks,
"postFilterChunks", preFilterChunks,
"filteredChunks", 0,
"preFilterSeries", preFilterSeries,
"postFilterSeries", preFilterSeries,
"filteredSeries", 0,
)

bq.metrics.chunksTotal.Add(float64(preFilterChunks))
bq.metrics.chunksFiltered.Add(0)
bq.metrics.seriesTotal.Add(float64(preFilterSeries))
bq.metrics.seriesFiltered.Add(0)

return chunkRefs, false, nil
}

var skippedGrps [][]*logproto.GroupedChunkRefs
Expand Down
3 changes: 2 additions & 1 deletion pkg/loki/modules.go
Original file line number Diff line number Diff line change
Expand Up @@ -1542,7 +1542,8 @@ func (t *Loki) initIndexGateway() (services.Service, error) {
}
resolver := bloomgateway.NewBlockResolver(t.BloomStore, logger)
querierCfg := bloomgateway.QuerierConfig{
MinTableOffset: t.Cfg.BloomBuild.Planner.MinTableOffset,
BuildTableOffset: t.Cfg.BloomBuild.Planner.MinTableOffset,
BuildInterval: t.Cfg.BloomBuild.Planner.PlanningInterval,
}
bloomQuerier = bloomgateway.NewQuerier(bloomGatewayClient, querierCfg, t.Overrides, resolver, prometheus.DefaultRegisterer, logger)
}
Expand Down

0 comments on commit 78d3c44

Please sign in to comment.