Skip to content

Commit

Permalink
fix(blooms): Do not fail requests when fetching metas from cache fails (
Browse files Browse the repository at this point in the history
#12838)

The bloom shipper uses metas to resolve available blocks. Metas are fetched from cache, and if not available from object storage.
If fetching metas from cache fails, e.g. timeout, the request should not fail, but proceed as if no metas were available.

Signed-off-by: Christian Haudum <christian.haudum@gmail.com>
  • Loading branch information
chaudum authored Apr 30, 2024
1 parent d751134 commit 667076d
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 2 deletions.
17 changes: 16 additions & 1 deletion pkg/storage/chunk/cache/mock.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,27 @@ type MockCache interface {
GetInternal() map[string][]byte
KeysRequested() int
GetKeys() []string
SetErr(error, error)
}

type mockCache struct {
numKeyUpdates int
keysRequested int
sync.Mutex
cache map[string][]byte
cache map[string][]byte
storeErr error // optional error that is returned when calling Store()
fetchErr error // optional error that is returned when calling Fetch()
}

func (m *mockCache) SetErr(storeErr, fetchErr error) {
m.storeErr, m.fetchErr = storeErr, fetchErr
}

func (m *mockCache) Store(_ context.Context, keys []string, bufs [][]byte) error {
if m.storeErr != nil {
return m.storeErr
}

m.Lock()
defer m.Unlock()
for i := range keys {
Expand All @@ -33,6 +44,10 @@ func (m *mockCache) Store(_ context.Context, keys []string, bufs [][]byte) error
}

func (m *mockCache) Fetch(_ context.Context, keys []string) (found []string, bufs [][]byte, missing []string, err error) {
if m.fetchErr != nil {
return nil, nil, nil, m.fetchErr
}

m.Lock()
defer m.Unlock()
for _, key := range keys {
Expand Down
3 changes: 2 additions & 1 deletion pkg/storage/stores/shipper/bloomshipper/fetcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,8 @@ func (f *Fetcher) FetchMetas(ctx context.Context, refs []MetaRef) ([]Meta, error
}
cacheHits, cacheBufs, _, err := f.metasCache.Fetch(ctx, keys)
if err != nil {
return nil, err
level.Error(f.logger).Log("msg", "failed to fetch metas from cache", "err", err)
return nil, nil
}

fromCache, missing, err := f.processMetasCacheResponse(ctx, refs, cacheHits, cacheBufs)
Expand Down
12 changes: 12 additions & 0 deletions pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"time"

"github.com/go-kit/log"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"

Expand Down Expand Up @@ -72,6 +73,7 @@ func TestMetasFetcher(t *testing.T) {
start []Meta // initial cache state
end []Meta // final cache state
fetch []Meta // metas to fetch
err error // error that is returned when calling cache.Fetch()
}{
{
name: "all metas found in cache",
Expand All @@ -94,12 +96,22 @@ func TestMetasFetcher(t *testing.T) {
end: makeMetas(t, schemaCfg, now, []v1.FingerprintBounds{{Min: 0x0000, Max: 0xffff}, {Min: 0x10000, Max: 0x1ffff}}),
fetch: makeMetas(t, schemaCfg, now, []v1.FingerprintBounds{{Min: 0x0000, Max: 0xffff}, {Min: 0x10000, Max: 0x1ffff}}),
},
{
name: "error fetching metas yields empty result",
err: errors.New("failed to fetch"),
store: makeMetas(t, schemaCfg, now, []v1.FingerprintBounds{{Min: 0x0000, Max: 0xffff}, {Min: 0x10000, Max: 0x1ffff}}),
start: makeMetas(t, schemaCfg, now, []v1.FingerprintBounds{{Min: 0x0000, Max: 0xffff}}),
end: makeMetas(t, schemaCfg, now, []v1.FingerprintBounds{{Min: 0x0000, Max: 0xffff}}),
fetch: []Meta{},
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx := context.Background()
metasCache := cache.NewMockCache()
metasCache.SetErr(nil, test.err)

cfg := bloomStoreConfig{workingDirs: []string{t.TempDir()}, numWorkers: 1}

oc, err := local.NewFSObjectClient(local.FSConfig{Directory: dir})
Expand Down

0 comments on commit 667076d

Please sign in to comment.