From 58458c6724ac3aecc9a587a8130e516343369c7e Mon Sep 17 00:00:00 2001 From: Koenraad Verheyden Date: Mon, 27 Sep 2021 09:10:50 +0200 Subject: [PATCH] Split up QueryTimeout for trace and search queries (#984) * Split up QueryTimeout for trace and search queries (cherry picked from commit 38a2aefd31ae4dc82105b2063fdf96f959cc3ca3) * Rename TraceIDQueryTimeout -> TraceLookupQueryTimeout --- CHANGELOG.md | 1 + modules/querier/config.go | 12 +++++++----- modules/querier/http.go | 8 ++++---- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a15fd2b6ba..080a34754de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ * [ENHANCEMENT] Add search block headers for wal blocks [#963](https://github.com/grafana/tempo/pull/963) (@mdisibio) * [ENHANCEMENT] Add support for vulture sending long running traces [#951](https://github.com/grafana/tempo/pull/951) (@zalegrala) * [ENHANCEMENT] Support global denylist and per-tenant allowlist of tags for search data. [#960](https://github.com/grafana/tempo/pull/960) (@annanay25) +* [ENHANCEMENT] Add `search_query_timeout` to Querier config. [#984](https://github.com/grafana/tempo/pull/984) (@kvrhdn) * [BUGFIX] Update port spec for GCS docker-compose example [#869](https://github.com/grafana/tempo/pull/869) (@zalegrala) * [BUGFIX] Fix "magic number" errors and other block mishandling when an ingester forcefully shuts down [#937](https://github.com/grafana/tempo/issues/937) (@mdisibio) * [BUGFIX] Fix compactor memory leak [#806](https://github.com/grafana/tempo/pull/806) (@mdisibio) diff --git a/modules/querier/config.go b/modules/querier/config.go index e6d7ad43b40..61b84d1adfc 100644 --- a/modules/querier/config.go +++ b/modules/querier/config.go @@ -11,15 +11,17 @@ import ( // Config for a querier. type Config struct { - QueryTimeout time.Duration `yaml:"query_timeout"` - ExtraQueryDelay time.Duration `yaml:"extra_query_delay,omitempty"` - MaxConcurrentQueries int `yaml:"max_concurrent_queries"` - Worker cortex_worker.Config `yaml:"frontend_worker"` + TraceLookupQueryTimeout time.Duration `yaml:"query_timeout"` + SearchQueryTimeout time.Duration `yaml:"search_query_timeout"` + ExtraQueryDelay time.Duration `yaml:"extra_query_delay,omitempty"` + MaxConcurrentQueries int `yaml:"max_concurrent_queries"` + Worker cortex_worker.Config `yaml:"frontend_worker"` } // RegisterFlagsAndApplyDefaults register flags. func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { - cfg.QueryTimeout = 10 * time.Second + cfg.TraceLookupQueryTimeout = 10 * time.Second + cfg.SearchQueryTimeout = 30 * time.Second cfg.ExtraQueryDelay = 0 cfg.MaxConcurrentQueries = 5 cfg.Worker = cortex_worker.Config{ diff --git a/modules/querier/http.go b/modules/querier/http.go index ba7792a2189..e7842fe3629 100644 --- a/modules/querier/http.go +++ b/modules/querier/http.go @@ -37,7 +37,7 @@ const ( // TraceByIDHandler is a http.HandlerFunc to retrieve traces func (q *Querier) TraceByIDHandler(w http.ResponseWriter, r *http.Request) { // Enforce the query timeout while querying backends - ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.QueryTimeout)) + ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.TraceLookupQueryTimeout)) defer cancel() span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.TraceByIDHandler") @@ -150,7 +150,7 @@ func validateAndSanitizeRequest(r *http.Request) (string, string, string, error) func (q *Querier) SearchHandler(w http.ResponseWriter, r *http.Request) { // Enforce the query timeout while querying backends - ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.QueryTimeout)) + ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.SearchQueryTimeout)) defer cancel() span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.SearchHandler") @@ -212,7 +212,7 @@ func (q *Querier) SearchHandler(w http.ResponseWriter, r *http.Request) { func (q *Querier) SearchTagsHandler(w http.ResponseWriter, r *http.Request) { // Enforce the query timeout while querying backends - ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.QueryTimeout)) + ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.SearchQueryTimeout)) defer cancel() span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.SearchTagsHandler") @@ -236,7 +236,7 @@ func (q *Querier) SearchTagsHandler(w http.ResponseWriter, r *http.Request) { func (q *Querier) SearchTagValuesHandler(w http.ResponseWriter, r *http.Request) { // Enforce the query timeout while querying backends - ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.QueryTimeout)) + ctx, cancel := context.WithDeadline(r.Context(), time.Now().Add(q.cfg.SearchQueryTimeout)) defer cancel() span, ctx := opentracing.StartSpanFromContext(ctx, "Querier.SearchTagValuesHandler")