Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into merge-release-0.25-to…
Browse files Browse the repository at this point in the history
…-main

Signed-off-by: Matej Gera <matejgera@gmail.com>
  • Loading branch information
matej-g committed Mar 24, 2022
2 parents add31fc + 149e026 commit 18ceb9b
Show file tree
Hide file tree
Showing 56 changed files with 1,055 additions and 598 deletions.
19 changes: 19 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,25 @@ NOTE: As semantic versioning states all 0.y.z releases can contain breaking chan

We use *breaking :warning:* to mark changes that are not backward compatible (relates only to v0.y.z releases.)

## Unreleased

### Fixed

### Added

- [#5220](https://github.com/thanos-io/thanos/pull/5220) Query Frontend: Add `--query-frontend.forward-header` flag, forward headers to downstream querier.

### Changed

- [#5205](https://github.com/thanos-io/thanos/pull/5205) Rule: Add ruler labels as external labels in stateless ruler mode.
- [#5206](https://github.com/thanos-io/thanos/pull/5206) Cache: add timeout for groupcache's fetch operation.
- [#5218](https://github.com/thanos-io/thanos/pull/5218) Tools: Run bucket downsample tools continuously.
- [#5224](https://github.com/thanos-io/thanos/pull/5224) Receive: Remove sort on label hashing
- [#5231](https://github.com/thanos-io/thanos/pull/5231) Tools: Bucket verify tool ignores blocks with deletion markers.
- [#5244](https://github.com/thanos-io/thanos/pull/5244) Query: Promote negative offset and `@` modifier to stable features as per Prometheus [#10121](https://github.com/prometheus/prometheus/pull/10121).

### Removed

## [v0.25.2](https://github.com/thanos-io/thanos/tree/release-0.25) - 2022.03.24

### Fixed
Expand Down
13 changes: 8 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,16 @@ arch = $(shell uname -m)

# The include .busybox-versions includes the SHA's of all the platforms, which can be used as var.
ifeq ($(arch), x86_64)
# amd64
BASE_DOCKER_SHA=${amd64}
# amd64
BASE_DOCKER_SHA=${amd64}
else ifeq ($(arch), armv8)
# arm64
BASE_DOCKER_SHA=${arm64}
# arm64
BASE_DOCKER_SHA=${arm64}
else ifeq ($(arch), arm64)
# arm64
BASE_DOCKER_SHA=${arm64}
else
echo >&2 "only support amd64 or arm64 arch" && exit 1
echo >&2 "only support amd64 or arm64 arch" && exit 1
endif
DOCKER_ARCHS ?= amd64 arm64
# Generate two target: docker-xxx-amd64, docker-xxx-arm64.
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.25.2
0.26.0-dev
48 changes: 25 additions & 23 deletions cmd/thanos/downsample.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ func RunDownsample(
httpTLSConfig string,
httpGracePeriod time.Duration,
dataDir string,
waitInterval time.Duration,
downsampleConcurrency int,
objStoreConfig *extflag.PathOrContent,
comp component.Component,
Expand Down Expand Up @@ -113,31 +114,32 @@ func RunDownsample(
defer runutil.CloseWithLogOnErr(logger, bkt, "bucket client")
statusProber.Ready()

level.Info(logger).Log("msg", "start first pass of downsampling")
metas, _, err := metaFetcher.Fetch(ctx)
if err != nil {
return errors.Wrap(err, "sync before first pass of downsampling")
}

for _, meta := range metas {
groupKey := meta.Thanos.GroupKey()
metrics.downsamples.WithLabelValues(groupKey)
metrics.downsampleFailures.WithLabelValues(groupKey)
}
if err := downsampleBucket(ctx, logger, metrics, bkt, metas, dataDir, downsampleConcurrency, hashFunc); err != nil {
return errors.Wrap(err, "downsampling failed")
}
return runutil.Repeat(waitInterval, ctx.Done(), func() error {
level.Info(logger).Log("msg", "start first pass of downsampling")
metas, _, err := metaFetcher.Fetch(ctx)
if err != nil {
return errors.Wrap(err, "sync before first pass of downsampling")
}

level.Info(logger).Log("msg", "start second pass of downsampling")
metas, _, err = metaFetcher.Fetch(ctx)
if err != nil {
return errors.Wrap(err, "sync before second pass of downsampling")
}
if err := downsampleBucket(ctx, logger, metrics, bkt, metas, dataDir, downsampleConcurrency, hashFunc); err != nil {
return errors.Wrap(err, "downsampling failed")
}
for _, meta := range metas {
groupKey := meta.Thanos.GroupKey()
metrics.downsamples.WithLabelValues(groupKey)
metrics.downsampleFailures.WithLabelValues(groupKey)
}
if err := downsampleBucket(ctx, logger, metrics, bkt, metas, dataDir, downsampleConcurrency, hashFunc); err != nil {
return errors.Wrap(err, "downsampling failed")
}

return nil
level.Info(logger).Log("msg", "start second pass of downsampling")
metas, _, err = metaFetcher.Fetch(ctx)
if err != nil {
return errors.Wrap(err, "sync before second pass of downsampling")
}
if err := downsampleBucket(ctx, logger, metrics, bkt, metas, dataDir, downsampleConcurrency, hashFunc); err != nil {
return errors.Wrap(err, "downsampling failed")
}
return nil
})
}, func(error) {
cancel()
})
Expand Down
22 changes: 9 additions & 13 deletions cmd/thanos/query.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ func registerQuery(app *extkingpin.App) {
enableMetricMetadataPartialResponse := cmd.Flag("metric-metadata.partial-response", "Enable partial response for metric metadata endpoint. --no-metric-metadata.partial-response for disabling.").
Hidden().Default("true").Bool()

featureList := cmd.Flag("enable-feature", "Comma separated experimental feature names to enable.The current list of features is "+promqlNegativeOffset+", "+promqlAtModifier+" and "+queryPushdown+".").Default("").Strings()
featureList := cmd.Flag("enable-feature", "Comma separated experimental feature names to enable.The current list of features is "+queryPushdown+".").Default("").Strings()

enableExemplarPartialResponse := cmd.Flag("exemplar.partial-response", "Enable partial response for exemplar endpoint. --no-exemplar.partial-response for disabling.").
Hidden().Default("true").Bool()
Expand All @@ -182,16 +182,16 @@ func registerQuery(app *extkingpin.App) {
return errors.Wrap(err, "parse federation labels")
}

var enableNegativeOffset, enableAtModifier, enableQueryPushdown bool
var enableQueryPushdown bool
for _, feature := range *featureList {
if feature == promqlNegativeOffset {
enableNegativeOffset = true
if feature == queryPushdown {
enableQueryPushdown = true
}
if feature == promqlAtModifier {
enableAtModifier = true
level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", promqlAtModifier)
}
if feature == queryPushdown {
enableQueryPushdown = true
if feature == promqlNegativeOffset {
level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", promqlNegativeOffset)
}
}

Expand Down Expand Up @@ -280,8 +280,6 @@ func registerQuery(app *extkingpin.App) {
*strictStores,
*strictEndpoints,
*webDisableCORS,
enableAtModifier,
enableNegativeOffset,
enableQueryPushdown,
*alertQueryURL,
component.Query,
Expand Down Expand Up @@ -349,8 +347,6 @@ func runQuery(
strictStores []string,
strictEndpoints []string,
disableCORS bool,
enableAtModifier bool,
enableNegativeOffset bool,
enableQueryPushdown bool,
alertQueryURL string,
comp component.Component,
Expand Down Expand Up @@ -480,8 +476,8 @@ func runQuery(
NoStepSubqueryIntervalFn: func(int64) int64 {
return defaultEvaluationInterval.Milliseconds()
},
EnableNegativeOffset: enableNegativeOffset,
EnableAtModifier: enableAtModifier,
EnableNegativeOffset: true,
EnableAtModifier: true,
}
)

Expand Down
2 changes: 2 additions & 0 deletions cmd/thanos/query_frontend.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,8 @@ func registerQueryFrontend(app *extkingpin.App) {
"If multiple headers match the request, the first matching arg specified will take precedence. "+
"If no headers match 'anonymous' will be used.").PlaceHolder("<http-header-name>").StringsVar(&cfg.orgIdHeaders)

cmd.Flag("query-frontend.forward-header", "List of headers forwarded by the query-frontend to downstream queriers, default is empty").PlaceHolder("<http-header-name>").StringsVar(&cfg.ForwardHeaders)

cmd.Flag("log.request.decision", "Deprecation Warning - This flag would be soon deprecated, and replaced with `request.logging-config`. Request Logging for logging the start and end of requests. By default this flag is disabled. LogFinishCall : Logs the finish call of the requests. LogStartAndFinishCall : Logs the start and finish call of the requests. NoLogCall : Disable request logging.").Default("").EnumVar(&cfg.RequestLoggingDecision, "NoLogCall", "LogFinishCall", "LogStartAndFinishCall", "")
reqLogConfig := extkingpin.RegisterRequestLoggingFlags(cmd)

Expand Down
9 changes: 6 additions & 3 deletions cmd/thanos/rule.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/storage"
Expand Down Expand Up @@ -362,7 +363,9 @@ func runRule(
return 0, nil
}, conf.dataDir, 1*time.Minute, nil)
if err := remoteStore.ApplyConfig(&config.Config{
GlobalConfig: config.DefaultGlobalConfig,
GlobalConfig: config.GlobalConfig{
ExternalLabels: labelsTSDBToProm(conf.lset),
},
RemoteWriteConfigs: rwCfg.RemoteWriteConfigs,
}); err != nil {
return errors.Wrap(err, "applying config to remote storage")
Expand Down Expand Up @@ -463,13 +466,13 @@ func runRule(
{
// Run rule evaluation and alert notifications.
notifyFunc := func(ctx context.Context, expr string, alerts ...*rules.Alert) {
res := make([]*alert.Alert, 0, len(alerts))
res := make([]*notifier.Alert, 0, len(alerts))
for _, alrt := range alerts {
// Only send actually firing alerts.
if alrt.State == rules.StatePending {
continue
}
a := &alert.Alert{
a := &notifier.Alert{
StartsAt: alrt.FiredAt,
Labels: alrt.Labels,
Annotations: alrt.Annotations,
Expand Down
10 changes: 8 additions & 2 deletions cmd/thanos/tools_bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ type bucketReplicateConfig struct {
}

type bucketDownsampleConfig struct {
waitInterval time.Duration
downsampleConcurrency int
dataDir string
hashFunc string
Expand Down Expand Up @@ -224,6 +225,8 @@ func (tbc *bucketRewriteConfig) registerBucketRewriteFlag(cmd extkingpin.FlagCla
}

func (tbc *bucketDownsampleConfig) registerBucketDownsampleFlag(cmd extkingpin.FlagClause) *bucketDownsampleConfig {
cmd.Flag("wait-interval", "Wait interval between downsample runs.").
Default("5m").DurationVar(&tbc.waitInterval)
cmd.Flag("downsample.concurrency", "Number of goroutines to use when downsampling blocks.").
Default("1").IntVar(&tbc.downsampleConcurrency)
cmd.Flag("data-dir", "Data directory in which to cache blocks and process downsamplings.").
Expand Down Expand Up @@ -330,7 +333,9 @@ func registerBucketVerify(app extkingpin.AppClause, objStoreConfig *extflag.Path
return err
}

fetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, bkt, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg), nil)
// We ignore any block that has the deletion marker file.
filters := []block.MetadataFilter{block.NewIgnoreDeletionMarkFilter(logger, bkt, 0, block.FetcherConcurrency)}
fetcher, err := block.NewMetaFetcher(logger, block.FetcherConcurrency, bkt, "", extprom.WrapRegistererWithPrefix(extpromPrefix, reg), filters)
if err != nil {
return err
}
Expand Down Expand Up @@ -747,7 +752,8 @@ func registerBucketDownsample(app extkingpin.AppClause, objStoreConfig *extflag.
tbc.registerBucketDownsampleFlag(cmd)

cmd.Setup(func(g *run.Group, logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, _ <-chan struct{}, _ bool) error {
return RunDownsample(g, logger, reg, *httpAddr, *httpTLSConfig, time.Duration(*httpGracePeriod), tbc.dataDir, tbc.downsampleConcurrency, objStoreConfig, component.Downsample, metadata.HashFunc(tbc.hashFunc))
return RunDownsample(g, logger, reg, *httpAddr, *httpTLSConfig, time.Duration(*httpGracePeriod), tbc.dataDir,
tbc.waitInterval, tbc.downsampleConcurrency, objStoreConfig, component.Downsample, metadata.HashFunc(tbc.hashFunc))
})
}

Expand Down
16 changes: 16 additions & 0 deletions docs/components/query-frontend.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,19 @@ Keys which denote a duration are strings that can end with `s` or `m` to indicat

You can find the default values [here](https://github.com/thanos-io/thanos/blob/55cb8ca38b3539381dc6a781e637df15c694e50a/pkg/exthttp/transport.go#L12-L27).

## Forward Headers to Downstream Queriers

`--query-frontend.forward-header` flag provides list of request headers forwarded by query frontend to downstream queriers.

If downstream queriers need basic authentication to access, we can run query-frontend:

```bash
thanos query-frontend \
--http-address "0.0.0.0:9090" \
--query-frontend.forward-header "Authorization"
--query-frontend.downstream-url="<thanos-querier>:<querier-http-port>"
```

## Flags

```$ mdox-exec="thanos query-frontend --help"
Expand Down Expand Up @@ -233,6 +246,9 @@ Flags:
--query-frontend.downstream-url="http://localhost:9090"
URL of downstream Prometheus Query compatible
API.
--query-frontend.forward-header=<http-header-name> ...
List of headers forwarded by the query-frontend
to downstream queriers, default is empty
--query-frontend.log-queries-longer-than=0
Log queries that are slower than the specified
duration. Set to 0 to disable. Set to < 0 to
Expand Down
1 change: 0 additions & 1 deletion docs/components/query.md
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,6 @@ Flags:
in all alerts 'Source' field.
--enable-feature= ... Comma separated experimental feature names to
enable.The current list of features is
promql-negative-offset, promql-at-modifier and
query-pushdown.
--endpoint=<endpoint> ... Addresses of statically configured Thanos API
servers (repeatable). The scheme may be
Expand Down
3 changes: 3 additions & 0 deletions docs/components/store.md
Original file line number Diff line number Diff line change
Expand Up @@ -429,6 +429,7 @@ config:
- http://10.123.22.100:8080
groupcache_group: test_group
dns_interval: 1s
timeout: 2s
```

In this case, three Thanos Store nodes are running in the same group meaning that they all point to the same remote object storage.
Expand All @@ -441,6 +442,8 @@ In the `peers` section it is possible to use the prefix form to automatically lo

Note that there must be no trailing slash in the `peers` configuration i.e. one of the strings must be identical to `self_url` and others should have the same form. Without this, loading data from peers may fail.

If timeout is set to zero then there is no timeout for fetching and fetching's lifetime is equal to the lifetime to the original request's lifetime. It is recommended to keep it higher than zero. It is generally preferred to keep this value higher because the fetching operation potentially includes loading of data from remote object storage.

## Index Header

In order to query series inside blocks from object storage, Store Gateway has to know certain initial info from each block index. In order to achieve so, on startup the Gateway builds an `index-header` for each block and stores it on local disk; such `index-header` is build by downloading specific pieces of original block's index, stored on local disk and then mmaped and used by Store Gateway.
Expand Down
1 change: 1 addition & 0 deletions docs/components/tools.md
Original file line number Diff line number Diff line change
Expand Up @@ -652,6 +652,7 @@ Flags:
format details:
https://thanos.io/tip/thanos/tracing.md/#configuration
--version Show application version.
--wait-interval=5m Wait interval between downsample runs.
```

Expand Down
Loading

0 comments on commit 18ceb9b

Please sign in to comment.