diff --git a/.buildkite/hooks/pre-checkout b/.buildkite/hooks/pre-checkout
deleted file mode 100755
index c19519b3b6..0000000000
--- a/.buildkite/hooks/pre-checkout
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-set -eo pipefail
-
-echo "--- :git: cleaning checkout"
-chmod -R +w ./_tools
diff --git a/.buildkite/hooks/pre-exit b/.buildkite/hooks/pre-exit
index 497ac79f51..db05d33b76 100644
--- a/.buildkite/hooks/pre-exit
+++ b/.buildkite/hooks/pre-exit
@@ -3,5 +3,7 @@
set -eo pipefail
echo "--- :git: cleaning checkout"
-chmod -R +w ./_tools || true
+if [[ -d "./_tools" ]]; then
+ chmod -R +w ./_tools
+fi
git clean -dffx
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
index c71d836da5..4d8569d0e8 100644
--- a/.buildkite/pipeline.yml
+++ b/.buildkite/pipeline.yml
@@ -14,7 +14,7 @@ steps:
command: make clean install-vendor-m3 test-all-gen
env:
CGO_ENABLED: 0
- GIMME_GO_VERSION: 1.13.x
+ GIMME_GO_VERSION: 1.14.x
plugins:
gopath-checkout#v1.0.1:
import: github.com/m3db/m3
@@ -35,8 +35,17 @@ steps:
run: app
workdir: /go/src/github.com/m3db/m3
<<: *common
- - name: "Services, Tools, Metalint"
- command: make clean install-vendor-m3 services tools metalint
+ - name: "Services, Tools"
+ command: make clean install-vendor-m3 services tools
+ plugins:
+ docker-compose#v2.5.1:
+ run: app
+ workdir: /go/src/github.com/m3db/m3
+ <<: *common
+ - name: "Lint"
+ command: make clean lint
+ env:
+ CGO_ENABLED: 0
plugins:
docker-compose#v2.5.1:
run: app
@@ -47,7 +56,7 @@ steps:
parallelism: 2
env:
CGO_ENABLED: 0
- GIMME_GO_VERSION: 1.13.x
+ GIMME_GO_VERSION: 1.14.x
plugins:
gopath-checkout#v1.0.1:
import: github.com/m3db/m3
@@ -57,7 +66,7 @@ steps:
parallelism: 1
env:
CGO_ENABLED: 0
- GIMME_GO_VERSION: 1.13.x
+ GIMME_GO_VERSION: 1.14.x
plugins:
gopath-checkout#v1.0.1:
import: github.com/m3db/m3
@@ -90,7 +99,7 @@ steps:
command: make clean install-vendor-m3 docs-test
env:
CGO_ENABLED: 0
- GIMME_GO_VERSION: 1.13.x
+ GIMME_GO_VERSION: 1.14.x
plugins:
gopath-checkout#v1.0.1:
import: github.com/m3db/m3
diff --git a/.ci b/.ci
index 96907c2669..15209040a6 160000
--- a/.ci
+++ b/.ci
@@ -1 +1 @@
-Subproject commit 96907c2669187b166eead31d9e9a5bc4fcbb9b52
+Subproject commit 15209040a6432a3280c1e2ed2d55ebd520ebe723
diff --git a/.excludemetalint b/.excludemetalint
deleted file mode 100644
index 9cd47f901a..0000000000
--- a/.excludemetalint
+++ /dev/null
@@ -1,11 +0,0 @@
-.pb.go
-_gen.go
-_gen_test.go
-_mock.go
-_string.go
-generated/
-mocks/
-vendor/
-src/m3ninx/x/bytes/slice_arraypool_gen.go
-src/m3ninx/index/segment/mem/ids_map_gen.go
-src/query/parser/m3ql/grammar.peg.go
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000000..8ecd89e592
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,290 @@
+# options for analysis running
+run:
+ # default concurrency is a available CPU number
+ # concurrency: 4
+
+ # timeout for analysis, e.g. 30s, 5m, default is 1m
+ deadline: 10m
+
+ # exit code when at least one issue was found, default is 1
+ issues-exit-code: 1
+
+ # include test files or not, default is true
+ tests: true
+
+ # list of build tags, all linters use it. Default is empty list.
+ build-tags: []
+
+ # which dirs to skip: they won't be analyzed;
+ # can use regexp here: generated.*, regexp is applied on full path;
+ # default value is empty list, but next dirs are always skipped independently
+ # from this option's value:
+ # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
+ skip-dirs:
+ - generated/.*
+ # There is some very weird golangci-lint bug that causes analysis to fail on the
+ # eventlog_server_test.go file (perhaps caused by https://github.com/golangci/golangci-lint/issues/995).
+ # To avoid sporadic CI failures we exclude the file from analysis for the time being.
+ # In order to exclude the file from analysis, and not just any lints in it, we need
+ # to put the file in a separate directory since although golangci-lint skips issues
+ # from files in the skip-files list, it still runs analysis on them.
+ - eventlog/test$
+
+ # which files to skip: they will be analyzed, but issues from them
+ # won't be reported. Default value is empty list, but there is
+ # no need to include all autogenerated files, we confidently recognize
+ # autogenerated files. If it's not please let us know.
+ skip-files: []
+
+ # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
+ # If invoked with -mod=readonly, the go command is disallowed from the implicit
+ # automatic updating of go.mod described above. Instead, it fails when any changes
+ # to go.mod are needed. This setting is most useful to check that go.mod does
+ # not need updates, such as in a continuous integration and testing system.
+ # If invoked with -mod=vendor, the go command assumes that the vendor
+ # directory holds the correct copies of dependencies and ignores
+ # the dependency descriptions in go.mod.
+ # modules-download-mode: readonly|release|vendor
+ modules-download-mode: readonly
+
+
+# output configuration options
+output:
+ # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
+ format: colored-line-number
+
+ # print lines of code with issue, default is true
+ print-issued-lines: true
+
+ # print linter name in the end of issue text, default is true
+ print-linter-name: true
+
+
+# all available settings of specific linters
+linters-settings:
+ govet:
+ # report about shadowed variables
+ check-shadowing: true
+ goimports:
+ # put imports beginning with prefix after 3rd-party packages;
+ # it's a comma-separated list of prefixes
+ local-prefixes: github.com/m3db/m3
+ gocyclo:
+ # minimal code complexity to report, 30 by default (but we recommend 10-20)
+ min-complexity: 10
+ maligned:
+ # print struct with more effective memory layout or not, false by default
+ suggest-new: true
+ dupl:
+ # tokens count to trigger issue, 150 by default
+ threshold: 100
+ goconst:
+ # minimal length of string constant, 3 by default
+ min-len: 3
+ # minimal occurrences count to trigger, 3 by default
+ min-occurrences: 3
+ depguard:
+ list-type: blacklist
+ include-go-root: false
+ packages:
+ - github.com/sirupsen/logrus
+ - github.com/golang/protobuf/jsonpb
+ - google.golang.org/protobuf/encoding/protojson
+ - github.com/golang/protobuf/proto
+ - google.golang.org/protobuf/proto
+ - github.com/tj/assert
+ packages-with-error-messages:
+ # specify an error message to output when a blacklisted package is used
+ github.com/sirupsen/logrus: "logging is allowed only by logutils.Log"
+ github.com/golang/protobuf/jsonpb: "replace with github.com/gogo/protobuf/jsonpb"
+ google.golang.org/protobuf/encoding/protojson: "replace with github.com/gogo/protobuf/jsonpb"
+ github.com/golang/protobuf/proto: "replace with github.com/gogo/protobuf/proto"
+ google.golang.org/protobuf/proto: "replace with github.com/gogo/protobuf/proto"
+ github.com/tj/assert: "use github.com/stretchr/testify/assert"
+ misspell:
+ # Correct spellings using locale preferences for US or UK.
+ # Default is to use a neutral variety of English.
+ # Setting locale to US will correct the British spelling of 'colour' to 'color'.
+ locale: US
+ ignore-words:
+ - someword
+ lll:
+ # max line length, lines longer will be reported. Default is 120.
+ # '\t' is counted as 1 character by default, and can be changed with the tab-width option
+ line-length: 100
+ # tab width in spaces. Default to 1.
+ tab-width: 1
+ unused:
+ # treat code as a program (not a library) and report unused exported identifiers; default is false.
+ # XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
+ # if it's called for subdir of a project it can't find funcs usages. All text editor integrations
+ # with golangci-lint call it on a directory with the changed file.
+ check-exported: false
+ unparam:
+ # Inspect exported functions, default is false. Set to true if no external program/library imports your code.
+ # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
+ # if it's called for subdir of a project it can't find external interfaces. All text editor integrations
+ # with golangci-lint call it on a directory with the changed file.
+ check-exported: false
+ nakedret:
+ # make an issue if func has more lines of code than this setting and it has naked returns; default is 30
+ max-func-lines: 30
+ prealloc:
+ # XXX: we don't recommend using this linter before doing performance profiling.
+ # For most programs usage of prealloc will be a premature optimization.
+
+ # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
+ # True by default.
+ simple: true
+ range-loops: true # Report preallocation suggestions on range loops, true by default
+ for-loops: false # Report preallocation suggestions on for loops, false by default
+ gocritic:
+ # Which checks should be enabled; can't be combined with 'disabled-checks';
+ # See https://go-critic.github.io/overview#checks-overview
+ # To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run`
+ # By default list of stable checks is used.
+
+ # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks.
+ # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags".
+ enabled-tags:
+ - performance
+ enabled-checks:
+ - ruleguard
+ settings: # settings passed to gocritic
+ captLocal: # must be valid enabled check name
+ paramsOnly: true
+ rangeValCopy:
+ sizeThreshold: 32
+ ruleguard:
+ rules: "src/cmd/tools/linter/gorules/rules.go"
+ gci:
+ # gci control golang package import order and make it always deterministic
+ local-prefixes: github.com/m3db/m3
+
+linters:
+ enable:
+ - deadcode
+ - dogsled
+ - dupl
+ - errcheck
+ - exhaustive
+ - gci
+ - goconst
+ - gocritic
+ - gocyclo
+ - godox
+ - goimports
+ - golint
+ - gosimple
+ - govet
+ - ineffassign
+ - lll
+ - maligned
+ - megacheck
+ - misspell
+ - prealloc
+ - staticcheck
+ - structcheck
+ - typecheck
+ - unconvert
+ - unparam
+ - varcheck
+ enable-all: false
+ disable:
+ - gomnd
+ - gochecknoinits
+ # Globals gonna global
+ - gochecknoglobals
+ # Overly harsh about long functions
+ - funlen
+ # Linter that checks that every comment ends in a period.
+ - godot
+ # Linter that makes you always use _test packages.
+ - testpackage
+ # Overly opinionated about how to construct errors
+ - goerr113
+ # Noisy warnings about whether "nolint" directives are necessary
+ - nolintlint
+ # Deprecated project due to being prone to bad suggestions.
+ - interfacer
+ # Valid use for not explicitly setting every field when they are optional nil/empty.
+ - exhaustivestruct
+ # We allow cuddling assignment following conditions because there are valid
+ # logical groupings for this use-case (e.g. when evaluating config values).
+ - wsl
+ # New line required before return would require a large fraction of the
+ # code base to need updating, it's not worth the perceived benefit.
+ - nlreturn
+ disable-all: false
+ presets:
+ # bodyclose, errcheck, gosec, govet, scopelint, staticcheck, typecheck
+ - bugs
+ # deadcode, ineffassign, structcheck, unparam, unused, varcheck
+ - unused
+ # gofmt, goimports
+ - format
+ # depguard, dupl, gochecknoglobals, gochecknoinits, goconst, gocritic,
+ # golint, gosimple, interfacer, lll, misspell, stylecheck, unconvert
+ - style
+ fast: false
+
+
+issues:
+ # List of regexps of issue texts to exclude, empty list by default.
+ # But independently from this option we use default exclude patterns,
+ # it can be disabled by `exclude-use-default: false`. To list all
+ # excluded by default patterns execute `golangci-lint run --help`
+ exclude:
+ # Exclude table-driven tests from scopelint (https://github.com/golangci/golangci-lint/issues/281).
+ - "Using the variable on range scope `tt` in function literal"
+ - "Using the variable on range scope `test` in function literal"
+ # It's common to shadow `err` and rarely indicates a problems. See
+ # https://github.com/golang/go/issues/19490 for further details.
+ - 'shadow: declaration of "err" shadows declaration'
+ # We commonly expose profiling information on /debug/pprof so we need to disable the gosec
+ # lint for it.
+ - "Profiling endpoint is automatically exposed on /debug/pprof"
+ # We only use md5 for non-cryptographic purposes (e.g. generating ID's where we don't assume
+ # the ID's are cryptographicly secure).
+ - "Blacklisted import `crypto/md5`: weak cryptographic primitive"
+ # The logger is often our last option to communicate that an error occurred so if it returns
+ # an error we don't have an alternative to use. Since it's already unlikely that `Log` will
+ # return an error anyway we often skip checking the error for brevity.
+ - "Error return value of `\\(github.com\\/go-kit\\/kit\\/log.Logger\\).Log` is not checked"
+ # The caller is responsible for closing the Body of an `http.Response`. However, this step
+ # is usually performed in a defer function after the response has already been processed and
+ # so errors, which are already rare, can usually be safely ignored.
+ - "Error return value of `[a-zA-Z.]+.Body.Close` is not checked"
+ # The errcheck linter already checks for unhandled errors so we can disable the equivalent
+ # lint by gosec.
+ - "G104: Errors unhandled"
+
+ # Excluding configuration per-path, per-linter, per-text and per-source
+ exclude-rules:
+ # Exclude lll issues for long lines with go:generate
+ - linters:
+ - lll
+ source: "^//go:generate "
+ # Exclude some linters from running on tests files.
+ # - path: _test\.go
+ # linters:
+ # - gocyclo
+ # - errcheck
+ # - dupl
+ # - gosec
+
+
+ # Independently from option `exclude` we use default exclude patterns,
+ # it can be disabled by this option. To list all
+ # excluded by default patterns execute `golangci-lint run --help`.
+ # Default value for this option is true.
+ exclude-use-default: false
+
+ # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
+ max-issues-per-linter: 0
+
+ # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
+ max-same-issues: 0
+
+ # Show only new issues created after git revision `REV`
+ new-from-rev: 5a3a12bba59e058b7a4cee6ec472c3f5786f741b
diff --git a/.metalinter.json b/.metalinter.json
deleted file mode 100644
index 55125b9232..0000000000
--- a/.metalinter.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
- "Linters": {
- "unused": {
- "Command": "unused -tags 'integration big'",
- "Pattern": "PATH:LINE:COL:MESSAGE"
- },
- "gosimple": {
- "Command": "gosimple -tags 'integration big'",
- "Pattern": "PATH:LINE:COL:MESSAGE"
- },
- "badtime": {
- "Command": "badtime -tags 'integration big'",
- "Pattern": "PATH:LINE:COL:MESSAGE"
- },
- "varcheck": {
- "Command": "varcheck -tags 'integration big'",
- "Pattern": "PATH:LINE:COL:MESSAGE"
- },
- "importorder": {
- "Command": "importorder -patterns='STDLIB github.com/m3db EXTERNAL' -tags 'integration big'",
- "Pattern": "PATH:LINE:MESSAGE"
- }
- },
- "Enable": [
- "varcheck",
- "structcheck",
- "goconst",
- "ineffassign",
- "unconvert",
- "misspell",
- "golint",
- "unused",
- "gosimple",
- "badtime",
- "maligned",
- "importorder"
- ],
- "Deadline": "3m",
- "EnableGC": true
-}
\ No newline at end of file
diff --git a/COMPATIBILITY.md b/COMPATIBILITY.md
new file mode 100644
index 0000000000..8c8ba40971
--- /dev/null
+++ b/COMPATIBILITY.md
@@ -0,0 +1,25 @@
+# Compatibility
+
+This document describes versioning compatibility guarantees. Any compatibility
+outlined here will be maintained for at least the current major version.
+Additional guarantees may be added within the same major version, but existing
+guarantees may only be modified or removed as part of a major version change.
+
+If you discover a breaking change, please [open an issue][issue].
+
+[issue]: https://github.com/m3db/m3/issues/new
+
+## v1.0
+
+As of version 1.0, m3db/m3 and its components are guaranteed to have **binary**,
+**configuration**, **wire**, and **transport API** compatibility:
+
+| Compatibility | Scope | Guarantee |
+| :------------ | :---- | :-------- |
+| binary | All released executable components. | All executable components will maintain their current functionality. Components may have *additional* functionality added, but no existing functionality will be changed or removed. |
+| configuration | All configuration serialization for binary components. | All configuration will maintain serialization compatibility. New configuration properties may be added, but existing properties will not be renamed, removed, re-typed, or changed semantically or behaviorally. |
+| wire | All on-the-wire encoding for data to/from binary components. | All wire encoding (e.g. Protobuf, Thrift) will maintain wire compatibility. Any changes to IDLs or other encodings will be backwards-compatible. |
+| transport API | All exposed transport APIs for communication to/from binary components. | All transport APIs (e.g. m3msg, Thrift, HTTP+JSON) will maintain their current surface area, definitions, semantics, and behavior. Any changes to APIs will be additive or done semantically and with wire compatibility. |
+
+This version does **not** guarantee library-level compatibility. Users are not
+encouraged to integrate libraries from m3db/m3 directly: use at your own risk.
diff --git a/GOVERNANCE.md b/GOVERNANCE.md
new file mode 100644
index 0000000000..b3c7a5cc9f
--- /dev/null
+++ b/GOVERNANCE.md
@@ -0,0 +1,20 @@
+# Governance
+
+M3 is owned by Uber, and is governed by its Technical Steering Committee (the
+"TSC").
+
+The current members of the TSC are:
+
+- @jimjag (Uber)
+- @martin-mao (Chronosphere)
+- @mway (Uber)
+- @prateek (Uber)
+- @robskillington (Chronosphere)
+
+While the TSC aims to operate as a consensus-based community, if any TSC
+decision requires a vote to move the proposal forward, decisions by vote require
+a majority vote of all TSC members.
+
+To reach the TSC directly, please email m3-tsc@uber.com. For administrative
+questions or issues, please contact ospo@uber.com. To add the TSC to issues or
+pull requests, add the [TSC label](https://github.com/m3db/m3/labels/TSC).
diff --git a/Makefile b/Makefile
index c737c4d4f8..92943d6670 100644
--- a/Makefile
+++ b/Makefile
@@ -20,9 +20,6 @@ tools_bin_path := $(abspath ./_tools/bin)
combined_bin_paths := $(tools_bin_path):$(gopath_bin_path)
retool_src_prefix := $(m3_package_path)/_tools/src
retool_package := github.com/twitchtv/retool
-metalint_check := .ci/metalint.sh
-metalint_config := .metalinter.json
-metalint_exclude := .excludemetalint
mocks_output_dir := generated/mocks
mocks_rules_dir := generated/mocks
proto_output_dir := generated/proto
@@ -41,11 +38,10 @@ GO_BUILD_LDFLAGS_CMD := $(abspath ./scripts/go-build-ldflags.sh)
GO_BUILD_LDFLAGS := $(shell $(GO_BUILD_LDFLAGS_CMD) LDFLAG)
GO_BUILD_COMMON_ENV := CGO_ENABLED=0
LINUX_AMD64_ENV := GOOS=linux GOARCH=amd64 $(GO_BUILD_COMMON_ENV)
-# GO_RELEASER_DOCKER_IMAGE is latest goreleaser for go 1.13
-GO_RELEASER_DOCKER_IMAGE := goreleaser/goreleaser:v0.127.0
+# GO_RELEASER_DOCKER_IMAGE is latest goreleaser for go 1.14
+GO_RELEASER_DOCKER_IMAGE := goreleaser/goreleaser:v0.141.0
GO_RELEASER_RELEASE_ARGS ?= --rm-dist
GO_RELEASER_WORKING_DIR := /go/src/github.com/m3db/m3
-GOMETALINT_VERSION := v2.0.5
# Retool will look for tools.json in the nearest parent git directory if not
# explicitly told the current dir. Allow setting the base dir so that tools can
@@ -93,6 +89,7 @@ TOOLS := \
verify_index_files \
carbon_load \
m3ctl \
+ linter \
.PHONY: setup
setup:
@@ -168,7 +165,7 @@ tools-linux-amd64:
$(LINUX_AMD64_ENV) make tools
.PHONY: all
-all: metalint test-ci-unit test-ci-integration services tools
+all: lint test-ci-unit test-ci-integration services tools
@echo Made all successfully
.PHONY: install-tools
@@ -177,10 +174,9 @@ install-tools:
GOBIN=$(tools_bin_path) go install github.com/fossas/fossa-cli/cmd/fossa
GOBIN=$(tools_bin_path) go install github.com/golang/mock/mockgen
GOBIN=$(tools_bin_path) go install github.com/google/go-jsonnet/cmd/jsonnet
- GOBIN=$(tools_bin_path) go install github.com/m3db/build-tools/linters/badtime
- GOBIN=$(tools_bin_path) go install github.com/m3db/build-tools/linters/importorder
GOBIN=$(tools_bin_path) go install github.com/m3db/build-tools/utilities/genclean
GOBIN=$(tools_bin_path) go install github.com/m3db/tools/update-license
+ GOBIN=$(tools_bin_path) go install github.com/golangci/golangci-lint/cmd/golangci-lint
GOBIN=$(tools_bin_path) go install github.com/mauricelam/genny
GOBIN=$(tools_bin_path) go install github.com/mjibson/esc
GOBIN=$(tools_bin_path) go install github.com/pointlander/peg
@@ -189,11 +185,6 @@ install-tools:
GOBIN=$(tools_bin_path) go install github.com/garethr/kubeval
GOBIN=$(tools_bin_path) go install github.com/wjdp/htmltest
-.PHONY: install-gometalinter
-install-gometalinter:
- @mkdir -p $(tools_bin_path)
- ./scripts/install-gometalinter.sh -b $(tools_bin_path) -d $(GOMETALINT_VERSION)
-
.PHONY: check-for-goreleaser-github-token
check-for-goreleaser-github-token:
ifndef GITHUB_TOKEN
@@ -265,8 +256,8 @@ SUBDIR_TARGETS := \
asset-gen \
genny-gen \
license-gen \
- all-gen \
- metalint
+ all-gen \
+ lint
.PHONY: test-ci-unit
test-ci-unit: test-base
@@ -374,11 +365,12 @@ test-ci-integration-$(SUBDIR):
@echo "--- uploading coverage report"
$(codecov_push) -f $(coverfile) -F $(SUBDIR)
-.PHONY: metalint-$(SUBDIR)
-metalint-$(SUBDIR): install-gometalinter install-linter-badtime install-linter-importorder
- @echo "--- metalinting $(SUBDIR)"
- @(PATH=$(combined_bin_paths):$(PATH) $(metalint_check) \
- $(metalint_config) $(metalint_exclude) src/$(SUBDIR))
+.PHONY: lint-$(SUBDIR)
+lint-$(SUBDIR): export GO_BUILD_TAGS = $(GO_BUILD_TAGS_LIST)
+lint-$(SUBDIR): install-tools linter
+ @echo "--- :golang: Running linters on $(SUBDIR)"
+ ./scripts/run-ci-lint.sh $(tools_bin_path)/golangci-lint ./src/$(SUBDIR)/...
+ ./bin/linter ./src/$(SUBDIR)/...
endef
@@ -392,9 +384,7 @@ endef
# generate targets across SUBDIRS for each SUBDIR_TARGET. i.e. generate rules
# which allow `make all-gen` to invoke `make all-gen-dbnode all-gen-coordinator ...`
-# NB: we skip metalint explicity as the default target below requires less invocations
-# of metalint and finishes faster.
-$(foreach SUBDIR_TARGET, $(filter-out metalint,$(SUBDIR_TARGETS)), $(eval $(SUBDIR_TARGET_RULE)))
+$(foreach SUBDIR_TARGET, $(SUBDIR_TARGETS), $(eval $(SUBDIR_TARGET_RULE)))
# Builds the single kube bundle from individual manifest files.
.PHONY: kube-gen-all
@@ -411,7 +401,7 @@ go-mod-tidy:
.PHONY: all-gen
all-gen: \
install-tools \
- $(foreach SUBDIR_TARGET, $(filter-out metalint all-gen,$(SUBDIR_TARGETS)), $(SUBDIR_TARGET)) \
+ $(foreach SUBDIR_TARGET, $(SUBDIR_TARGETS), $(SUBDIR_TARGET)) \
kube-gen-all \
go-mod-tidy
@@ -462,12 +452,6 @@ else
bash -c "source $(SELF_DIR)/.nvm/nvm.sh && nvm use 6 && $(node_cmd)"
endif
-.PHONY: metalint
-metalint: install-gometalinter install-tools
- @echo "--- metalinting src/"
- @(PATH=$(tools_bin_path):$(PATH) $(metalint_check) \
- $(metalint_config) $(metalint_exclude) $(m3_package_path)/src/)
-
# Tests that all currently generated types match their contents if they were regenerated
.PHONY: test-all-gen
test-all-gen: all-gen
diff --git a/STYLEGUIDE.md b/STYLEGUIDE.md
new file mode 100644
index 0000000000..33352c7e13
--- /dev/null
+++ b/STYLEGUIDE.md
@@ -0,0 +1,168 @@
+# M3 Coding Styleguide
+
+M3's umbrella coding style guide is Uber's [Go Style Guide][uber-guide]. This
+document is maintained as a superset of that guide, capturing any substantive,
+intended departures from Uber's coding style. Where possible, code should follow
+these guidelines to ensure consistent style and coding practices across the
+codebase.
+
+Above all else, this guide is intended to be a point of reference rather than
+a sacred text. We maintain a style guide as a pragmatic tool that can be used to
+avoid common issues and normalize code across many authors and the Go community.
+
+New code should follow the style guide by default, preferring guidelines
+established here or in Uber's guide over any conflicting, pre-existing
+precedents in the M3 codebase. Ultimately, the hope is that the codebase
+incrementally moves closer to the style guide with each change.
+
+Since the M3 monorepo predated this style guide, reviewers should not expect
+contributors to make unrelated or unreasonable style-based changes as part of
+pull requests. However, when changing code that could reasonably be updated
+to follow the guide, we prefer that those changes adopt the guidelines to avoid
+sustaining or increasing technical debt. See DEVELOPMENT.md for more detail on
+changes involving style.
+
+[uber-guide]: https://github.com/uber-go/guide/blob/master/style.md
+
+## Linting
+
+Many guidelines are flagged by `go vet` or the other configured linters (see
+[.golangci.yml][.golangci.yml]). Wherever possible, we prefer to use tooling to
+enforce style to remove subjectivity or ambiguity. Linting is also a blocking
+build for merging pull requests.
+
+[.golangci.yml]: https://github.com/m3db/m3/blob/master/.golangci.yml
+
+## Template
+
+When adding to this guide, use the following template:
+
+~~~
+### Short sentence about the guideline.
+
+Clearly (and succinctly) articulate the guideline and its rationale, including
+any problematic counter-examples. Be intentional when using language like
+"always", "never", etc, instead using words like "prefer" and "avoid" if the
+guideline isn't a hard rule. If it makes sense, also include example code:
+
+
+Bad | Good |
+
+
+
+```go
+goodExample := false
+```
+
+ |
+
+```go
+goodExample := true
+```
+
+ |
+
+Description of bad code.
+ |
+Description of good code.
+ |
+
+~~~
+
+## Guidelines
+
+### Export types carefully.
+
+Types should only be exported if they must be used by multiple packages. This
+applies also to adding new packages: a new package should only be added if it
+will be imported by multiple packages. If a given type or package will only
+initially be imported in one package, define those type(s) in that importing
+package instead.
+
+In general, it's harder to reduce surface area than it is to incrementally
+increase surface area, and the former is a breaking change while the latter is
+often not.
+
+### Treat flaky tests like consistent failures.
+
+Flaky tests add noise to code health signals, reduce trust in tests to be
+representative of code behavior. Worse, flaky tests can be either false positive
+or false negative, making it especially unclear as to whether or not a given
+test passing or failing is good or bad. All of these reduce overall velocity
+and/or reliability.
+
+All tests discovered to be flaky should be immediately result in either (a) the
+test being skipped because it is unreliable, or (b) master being frozen until
+the test is fixed and proven to no longer be flaky.
+
+### Do not expose experimental package types in non-experimental packages.
+
+A package is only able to guarantee a level of maturity/stability that is the
+lowest common denominator of all of its composing or transitively exported
+types. Given a hypothetical scenario:
+
+```go
+package foo
+
+type Bar {
+ Baz xfoo.Baz
+}
+```
+
+In this case, the stability of `foo.Bar` is purportedly guaranteed by package
+`foo` being non-experimental, but since it transitively exposes `xfoo.Baz` as
+part of `foo.Bar`, either (a) `xfoo.Baz` must implicitly adhere to versioning
+compatibility guarantees or (b) `foo` can no longer be considered stable,
+as any breaking change to `xfoo.Baz` will break `foo`.
+
+This is spiritually similar to the
+[Avoid Embedding Types In Public Structs][avoid-embedding-types] guidance, in
+that it bleeds implementation and compatibility details in an inappropriate way.
+
+This guidance also applies to any cases in which `internal` packages are used:
+any `internal` type is essentially the same as an unexported type, meaning that
+that type is only implicitly available to users.
+
+[avoid-embedding-types]: https://github.com/uber-go/guide/blob/master/style.md#avoid-embedding-types-in-public-structs
+
+
+Bad | Good |
+
+
+
+```go
+type NewConnectionFn func(
+ channelName string, addr string, opts Options,
+) (xclose.SimpleCloser, rpc.TChanNode, error)
+```
+
+ |
+
+```go
+type NewConnectionFn func(
+ channelName string, addr string, opts Options,
+) (io.Closer, rpc.TChanNode, error)
+
+// or
+
+type SimpleCloser = func()
+
+type NewConnectionFn func(
+ channelName string, addr string, opts Options,
+) (SimpleCloser, rpc.TChanNode, error)
+```
+
+ |
+
+
+`xclose.SimpleCloser` is part of `x/close`, an experimental package, but is
+directly exposed as part of `src/dbnode/client.NewConnectionFn`.
+
+ |
+
+The canonical `io.Closer` is used instead, or a type alias representing
+`xclose.SimpleCloser` is used instead. Both options prevent leaking experimental
+packages as part of non-experimental library APIs.
+
+ |
+
diff --git a/config/m3db/clustered-etcd/generated.yaml b/config/m3db/clustered-etcd/generated.yaml
index f62e7ada30..435fc03f50 100644
--- a/config/m3db/clustered-etcd/generated.yaml
+++ b/config/m3db/clustered-etcd/generated.yaml
@@ -73,4 +73,3 @@
"sanitization": "prometheus"
"writeNewSeriesAsync": true
"writeNewSeriesBackoffDuration": "2ms"
- "writeNewSeriesLimitPerSecond": 1048576
diff --git a/config/m3db/clustered-etcd/m3dbnode.libsonnet b/config/m3db/clustered-etcd/m3dbnode.libsonnet
index 50bb6fa1ed..39de162dd5 100644
--- a/config/m3db/clustered-etcd/m3dbnode.libsonnet
+++ b/config/m3db/clustered-etcd/m3dbnode.libsonnet
@@ -101,7 +101,6 @@ function(cluster, coordinator={}, db={}) {
},
"gcPercentage": 100,
"writeNewSeriesAsync": true,
- "writeNewSeriesLimitPerSecond": 1048576,
"writeNewSeriesBackoffDuration": "2ms",
"cache": {
"series": {
diff --git a/config/m3db/local-etcd/generated.yaml b/config/m3db/local-etcd/generated.yaml
index cc5b10c10c..d307c410a9 100644
--- a/config/m3db/local-etcd/generated.yaml
+++ b/config/m3db/local-etcd/generated.yaml
@@ -68,4 +68,3 @@
"sanitization": "prometheus"
"writeNewSeriesAsync": true
"writeNewSeriesBackoffDuration": "2ms"
- "writeNewSeriesLimitPerSecond": 1048576
diff --git a/config/m3db/local-etcd/m3dbnode.libsonnet b/config/m3db/local-etcd/m3dbnode.libsonnet
index fc6e5c5651..575733ba98 100644
--- a/config/m3db/local-etcd/m3dbnode.libsonnet
+++ b/config/m3db/local-etcd/m3dbnode.libsonnet
@@ -60,7 +60,6 @@ function(coordinator={}, db={}) {
},
"gcPercentage": 100,
"writeNewSeriesAsync": true,
- "writeNewSeriesLimitPerSecond": 1048576,
"writeNewSeriesBackoffDuration": "2ms",
"cache": {
"series": {
diff --git a/docker-compose.yml b/docker-compose.yml
index 5097f20e75..36aa185329 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,5 +1,5 @@
app:
- image: golang:1.13-stretch
+ image: golang:1.14-stretch
volumes:
- .:/go/src/github.com/m3db/m3
- /usr/bin/buildkite-agent:/usr/bin/buildkite-agent
diff --git a/docker/images.json b/docker/images.json
index 5a7feb1bb8..ecea5f3d2c 100644
--- a/docker/images.json
+++ b/docker/images.json
@@ -17,10 +17,6 @@
"name": "m3dbnode",
"tag_suffix": "setcap"
},
- "m3nsch": {
- "dockerfile": "docker/m3nsch/Dockerfile",
- "name": "m3nsch"
- },
"m3query": {
"dockerfile": "docker/m3query/Dockerfile",
"name": "m3query"
diff --git a/docker/m3aggregator/Dockerfile b/docker/m3aggregator/Dockerfile
index b637388c29..a423bc03ac 100644
--- a/docker/m3aggregator/Dockerfile
+++ b/docker/m3aggregator/Dockerfile
@@ -1,5 +1,5 @@
# stage 1: build
-FROM golang:1.13-alpine3.11 AS builder
+FROM golang:1.14-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
# Install deps
diff --git a/docker/m3collector/Dockerfile b/docker/m3collector/Dockerfile
index f0e55b9930..e19f4da382 100644
--- a/docker/m3collector/Dockerfile
+++ b/docker/m3collector/Dockerfile
@@ -1,5 +1,5 @@
# stage 1: build
-FROM golang:1.13-alpine3.11 AS builder
+FROM golang:1.14-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
# Install deps
diff --git a/docker/m3coordinator/Dockerfile b/docker/m3coordinator/Dockerfile
index 675183b68d..eba6c1bf7e 100644
--- a/docker/m3coordinator/Dockerfile
+++ b/docker/m3coordinator/Dockerfile
@@ -1,5 +1,5 @@
# stage 1: build
-FROM golang:1.13-alpine3.11 AS builder
+FROM golang:1.14-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
# Install deps
diff --git a/docker/m3dbnode/Dockerfile b/docker/m3dbnode/Dockerfile
index de6ca6e830..d85a4d7138 100644
--- a/docker/m3dbnode/Dockerfile
+++ b/docker/m3dbnode/Dockerfile
@@ -1,5 +1,5 @@
# stage 1: build
-FROM golang:1.13-alpine3.11 AS builder
+FROM golang:1.14-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
# Install deps
diff --git a/docker/m3dbnode/Dockerfile-setcap b/docker/m3dbnode/Dockerfile-setcap
index a2b7463d27..49d8bffafe 100644
--- a/docker/m3dbnode/Dockerfile-setcap
+++ b/docker/m3dbnode/Dockerfile-setcap
@@ -1,5 +1,5 @@
# stage 1: build
-FROM golang:1.13-alpine3.11 AS builder
+FROM golang:1.14-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
# Install deps
diff --git a/docker/m3query/Dockerfile b/docker/m3query/Dockerfile
index 58a6d8fe8a..bd548db079 100644
--- a/docker/m3query/Dockerfile
+++ b/docker/m3query/Dockerfile
@@ -1,5 +1,5 @@
# stage 1: build
-FROM golang:1.13-alpine3.11 AS builder
+FROM golang:1.14-alpine3.11 AS builder
LABEL maintainer="The M3DB Authors "
# Install deps
diff --git a/go.mod b/go.mod
index 91e3ee308a..8e46e640e9 100644
--- a/go.mod
+++ b/go.mod
@@ -24,6 +24,7 @@ require (
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f
github.com/davecgh/go-spew v1.1.1
github.com/docker/go-connections v0.4.0 // indirect
+ github.com/fatih/color v1.10.0 // indirect
github.com/fortytw2/leaktest v1.2.1-0.20180901000122-b433bbd6d743
github.com/fossas/fossa-cli v1.0.30
github.com/garethr/kubeval v0.0.0-20180821130434-c44f5193dc94
@@ -34,9 +35,9 @@ require (
github.com/go-playground/universal-translator v0.17.0 // indirect
github.com/gogo/protobuf v1.3.1
github.com/golang/mock v1.4.4
- github.com/golang/protobuf v1.3.3
+ github.com/golang/protobuf v1.4.2
github.com/golang/snappy v0.0.1
- github.com/google/go-cmp v0.5.1
+ github.com/google/go-cmp v0.5.2
github.com/google/go-jsonnet v0.16.0
github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f // indirect
github.com/gorilla/handlers v1.4.2 // indirect
@@ -47,6 +48,7 @@ require (
github.com/influxdata/influxdb v1.7.7
github.com/jhump/protoreflect v1.6.1
github.com/json-iterator/go v1.1.9
+ github.com/kr/text v0.2.0 // indirect
github.com/leanovate/gopter v0.2.8
github.com/lib/pq v1.6.0 // indirect
github.com/lightstep/lightstep-tracer-go v0.18.1
@@ -65,6 +67,9 @@ require (
github.com/m3dbx/vellum v0.0.0-20200826162549-f94c029903de
github.com/mauricelam/genny v0.0.0-20180903214747-eb2c5232c885
github.com/mjibson/esc v0.1.0
+ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
+ github.com/onsi/ginkgo v1.14.1 // indirect
+ github.com/onsi/gomega v1.10.2 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/runc v0.1.1 // indirect
github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9
@@ -90,8 +95,9 @@ require (
github.com/satori/go.uuid v1.2.0
github.com/sergi/go-diff v1.1.0
github.com/shirou/gopsutil v2.20.5+incompatible // indirect
+ github.com/sirupsen/logrus v1.7.0 // indirect
github.com/spf13/cast v1.3.1-0.20190531151931-f31dc0aaab5a // indirect
- github.com/spf13/cobra v0.0.5
+ github.com/spf13/cobra v1.1.1
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 // indirect
github.com/stretchr/testify v1.6.1
@@ -116,8 +122,10 @@ require (
go.uber.org/zap v1.13.0
golang.org/x/net v0.0.0-20200822124328-c89045814202
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
- golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a
+ golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634
+ golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752
google.golang.org/grpc v1.29.1
+ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/go-ini/ini.v1 v1.57.0 // indirect
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
gopkg.in/go-playground/validator.v9 v9.7.0
@@ -125,8 +133,9 @@ require (
gopkg.in/src-d/go-git.v4 v4.13.1 // indirect
gopkg.in/validator.v2 v2.0.0-20160201165114-3e4f037f12a1
gopkg.in/vmihailenco/msgpack.v2 v2.8.3
- gopkg.in/yaml.v2 v2.2.8
+ gopkg.in/yaml.v2 v2.3.0
gotest.tools v2.2.0+incompatible
+ honnef.co/go/tools v0.0.1-2020.1.6 // indirect
)
// branch 0.9.3-pool-read-binary-3
diff --git a/go.sum b/go.sum
index 027a861899..2971417542 100644
--- a/go.sum
+++ b/go.sum
@@ -160,7 +160,10 @@ github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -202,6 +205,8 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
+github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/fortytw2/leaktest v1.2.1-0.20180901000122-b433bbd6d743 h1:QDM8xNoGxemDHdExynv+HzqkTPsFFZ8EyZdMwGElpGg=
@@ -212,6 +217,8 @@ github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVB
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/garethr/kubeval v0.0.0-20180821130434-c44f5193dc94 h1:NMtO+FvLt7roVanhHmJUsIRq9sEbEytH/PWNE+zR8vw=
github.com/garethr/kubeval v0.0.0-20180821130434-c44f5193dc94/go.mod h1:L8VwozDBY4bGI25r29I6FURZus8xlVo/B7lNOSfre2g=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -311,6 +318,13 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -330,8 +344,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-github/v30 v30.1.0 h1:VLDx+UolQICEOKu2m4uAoMti1SxuEBAl7RSEG16L+Oo=
github.com/google/go-github/v30 v30.1.0/go.mod h1:n8jBpHl45a/rlBUtRJMOG4GhNADUQFEufcolZ95JfU8=
github.com/google/go-jsonnet v0.16.0 h1:Nb4EEOp+rdeGGyB1rQ5eisgSAqrTnhf9ip+X6lzZbY0=
@@ -500,6 +514,8 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/leanovate/gopter v0.2.8 h1:eFPtJ3aa5zLfbxGROSNY75T9Dume60CWBAqoWQ3h/ig=
github.com/leanovate/gopter v0.2.8/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8=
@@ -556,6 +572,8 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
@@ -611,6 +629,10 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
@@ -623,12 +645,20 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4=
+github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=
+github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -763,6 +793,8 @@ github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvH
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8=
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
@@ -788,6 +820,8 @@ github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
+github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
@@ -798,6 +832,7 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
@@ -868,6 +903,7 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
@@ -940,6 +976,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -972,6 +1010,7 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1036,12 +1075,17 @@ golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200305205014-bc073721adb6/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200410194907-79a7a3126eef/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b h1:zSzQJAznWxAh9fZxiPy2FZo+ZZEYoYFYYDYdOrU7AaM=
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752 h1:2ntEwh02rqo2jSsrYmp4yKHHjh0CbXP3ZtSUetSB+q8=
+golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@@ -1088,12 +1132,21 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
@@ -1140,6 +1193,8 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
@@ -1150,6 +1205,8 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.6 h1:W18jzjh8mfPez+AwGLxmOImucz/IFjpNlrKVnaj2YVc=
+honnef.co/go/tools v0.0.1-2020.1.6/go.mod h1:pyyisuGw24ruLjrr1ddx39WE0y9OooInRzEYLhQB2YY=
k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0=
k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g=
k8s.io/client-go v0.17.3/go.mod h1:cLXlTMtWHkuK4tD360KpWz2gG2KtdWEr/OT02i3emRQ=
diff --git a/integrations/grafana/m3nsch_dashboard.json b/integrations/grafana/m3nsch_dashboard.json
deleted file mode 100644
index 32e48ac9e7..0000000000
--- a/integrations/grafana/m3nsch_dashboard.json
+++ /dev/null
@@ -1,499 +0,0 @@
-{
- "__inputs": [
- {
- "name": "DS_PROMETHEUS",
- "label": "prometheus",
- "description": "",
- "type": "datasource",
- "pluginId": "prometheus",
- "pluginName": "Prometheus"
- }
- ],
- "__requires": [
- {
- "type": "grafana",
- "id": "grafana",
- "name": "Grafana",
- "version": "5.2.4"
- },
- {
- "type": "panel",
- "id": "graph",
- "name": "Graph",
- "version": "5.0.0"
- },
- {
- "type": "datasource",
- "id": "prometheus",
- "name": "Prometheus",
- "version": "5.0.0"
- }
- ],
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "gnetId": null,
- "graphTooltip": 0,
- "id": null,
- "iteration": 1550848579805,
- "links": [],
- "panels": [
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 2,
- "panels": [],
- "title": "Write Requests",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "${DS_PROMETHEUS}",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 1
- },
- "id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(agent_write_success{instance=~\"$instance\"}[$step])) by (instance)",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "writes-success-{{instance}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Write Success / s",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "${DS_PROMETHEUS}",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 1
- },
- "id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(agent_write_errors{instance=~\"$instance\"}[$step])) by (instance)",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "writes-errors-{{instance}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Write Errors / s",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "${DS_PROMETHEUS}",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 10
- },
- "id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "agent_write_success_latency{instance=~\"$instance\",quantile=\"0.99\"}",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "write-success-{{instance}}-p99",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Write Success P99",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "s",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "${DS_PROMETHEUS}",
- "fill": 1,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 10
- },
- "id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "agent_write_errors_latency{instance=~\"$instance\",quantile=\"0.99\"}",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "write-errors-{{instance}}-p99",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Write Errors P99",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "s",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "schemaVersion": 16,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "allValue": null,
- "current": {},
- "datasource": "${DS_PROMETHEUS}",
- "hide": 0,
- "includeAll": true,
- "label": null,
- "multi": true,
- "name": "instance",
- "options": [],
- "query": "label_values(agent_write_success,instance)",
- "refresh": 1,
- "regex": "",
- "sort": 0,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
- },
- {
- "allValue": null,
- "current": {
- "tags": [],
- "text": "1m",
- "value": "1m"
- },
- "hide": 0,
- "includeAll": false,
- "label": null,
- "multi": false,
- "name": "step",
- "options": [
- {
- "selected": false,
- "text": "30s",
- "value": "30s"
- },
- {
- "selected": true,
- "text": "1m",
- "value": "1m"
- },
- {
- "selected": false,
- "text": "5m",
- "value": "5m"
- },
- {
- "selected": false,
- "text": "10m",
- "value": "10m"
- }
- ],
- "query": "30s,1m,5m,10m",
- "type": "custom"
- }
- ]
- },
- "time": {
- "from": "now-1h",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "",
- "title": "M3nsch",
- "uid": "1gePBprmz",
- "version": 6
-}
diff --git a/kube/bundle.yaml b/kube/bundle.yaml
index 8e99d0b72e..bc2bd61b36 100644
--- a/kube/bundle.yaml
+++ b/kube/bundle.yaml
@@ -163,7 +163,6 @@ data:
gcPercentage: 100
writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
writeNewSeriesBackoffDuration: 2ms
commitlog:
@@ -176,18 +175,19 @@ data:
filesystem:
filePathPrefix: /var/lib/m3db
- config:
- service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - http://etcd-0.etcd:2379
- - http://etcd-1.etcd:2379
- - http://etcd-2.etcd:2379
+ discovery:
+ config:
+ service:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - http://etcd-0.etcd:2379
+ - http://etcd-1.etcd:2379
+ - http://etcd-2.etcd:2379
---
# Headless service for the statefulset
apiVersion: v1
diff --git a/kube/m3dbnode-configmap.yaml b/kube/m3dbnode-configmap.yaml
index f51032e5c9..0536215c4a 100644
--- a/kube/m3dbnode-configmap.yaml
+++ b/kube/m3dbnode-configmap.yaml
@@ -53,7 +53,6 @@ data:
gcPercentage: 100
writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
writeNewSeriesBackoffDuration: 2ms
commitlog:
@@ -66,15 +65,16 @@ data:
filesystem:
filePathPrefix: /var/lib/m3db
- config:
- service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - http://etcd-0.etcd:2379
- - http://etcd-1.etcd:2379
- - http://etcd-2.etcd:2379
+ discovery:
+ config:
+ service:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - http://etcd-0.etcd:2379
+ - http://etcd-1.etcd:2379
+ - http://etcd-2.etcd:2379
diff --git a/kube/terraform/main.tf b/kube/terraform/main.tf
index 675805edda..6391658abb 100755
--- a/kube/terraform/main.tf
+++ b/kube/terraform/main.tf
@@ -133,7 +133,7 @@ resource "kubernetes_config_map" "m3dbnode_config" {
namespace = "m3db"
}
data {
- m3dbnode.yml = "coordinator:\n listenAddress: \"0.0.0.0:7201\"\n local:\n namespaces:\n - namespace: default\n type: unaggregated\n retention: 48h\n metrics:\n scope:\n prefix: \"coordinator\"\n prometheus:\n handlerPath: /metrics\n listenAddress: 0.0.0.0:7203\n sanitization: prometheus\n samplingRate: 1.0\n extended: none\n tagOptions:\n idScheme: quoted\n\ndb:\n logging:\n level: info\n\n metrics:\n prometheus:\n handlerPath: /metrics\n sanitization: prometheus\n samplingRate: 1.0\n extended: detailed\n\n listenAddress: 0.0.0.0:9000\n clusterListenAddress: 0.0.0.0:9001\n httpNodeListenAddress: 0.0.0.0:9002\n httpClusterListenAddress: 0.0.0.0:9003\n debugListenAddress: 0.0.0.0:9004\n\n hostID:\n resolver: hostname\n\n client:\n writeConsistencyLevel: majority\n readConsistencyLevel: unstrict_majority\n\n gcPercentage: 100\n\n writeNewSeriesAsync: true\n writeNewSeriesLimitPerSecond: 1048576\n writeNewSeriesBackoffDuration: 2ms\n\n commitlog:\n flushMaxBytes: 524288\n flushEvery: 1s\n queue:\n calculationType: fixed\n size: 2097152\n\n filesystem:\n filePathPrefix: /var/lib/m3db\n\n config:\n service:\n env: default_env\n zone: embedded\n service: m3db\n cacheDir: /var/lib/m3kv\n etcdClusters:\n - zone: embedded\n endpoints:\n - http://etcd-0.etcd:2379\n - http://etcd-1.etcd:2379\n - http://etcd-2.etcd:2379\n"
+ m3dbnode.yml = "coordinator:\n listenAddress: \"0.0.0.0:7201\"\n local:\n namespaces:\n - namespace: default\n type: unaggregated\n retention: 48h\n metrics:\n scope:\n prefix: \"coordinator\"\n prometheus:\n handlerPath: /metrics\n listenAddress: 0.0.0.0:7203\n sanitization: prometheus\n samplingRate: 1.0\n extended: none\n tagOptions:\n idScheme: quoted\n\ndb:\n logging:\n level: info\n\n metrics:\n prometheus:\n handlerPath: /metrics\n sanitization: prometheus\n samplingRate: 1.0\n extended: detailed\n\n listenAddress: 0.0.0.0:9000\n clusterListenAddress: 0.0.0.0:9001\n httpNodeListenAddress: 0.0.0.0:9002\n httpClusterListenAddress: 0.0.0.0:9003\n debugListenAddress: 0.0.0.0:9004\n\n hostID:\n resolver: hostname\n\n client:\n writeConsistencyLevel: majority\n readConsistencyLevel: unstrict_majority\n\n gcPercentage: 100\n\n writeNewSeriesAsync: true\n writeNewSeriesBackoffDuration: 2ms\n\n commitlog:\n flushMaxBytes: 524288\n flushEvery: 1s\n queue:\n calculationType: fixed\n size: 2097152\n\n filesystem:\n filePathPrefix: /var/lib/m3db\n\n config:\n service:\n env: default_env\n zone: embedded\n service: m3db\n cacheDir: /var/lib/m3kv\n etcdClusters:\n - zone: embedded\n endpoints:\n - http://etcd-0.etcd:2379\n - http://etcd-1.etcd:2379\n - http://etcd-2.etcd:2379\n"
}
}
diff --git a/scripts/development/m3_stack/m3dbnode.yml b/scripts/development/m3_stack/m3dbnode.yml
index 4243c971c1..fb3db8d151 100644
--- a/scripts/development/m3_stack/m3dbnode.yml
+++ b/scripts/development/m3_stack/m3dbnode.yml
@@ -1,7 +1,4 @@
db:
- logging:
- level: info
-
tracing:
backend: jaeger
jaeger:
@@ -11,63 +8,29 @@ db:
type: const
param: 1
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
hostID:
resolver: environment
envVarName: M3DB_HOST_ID
- client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- cache:
- series:
- policy: lru
- postingsList:
- size: 262144
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
-
- filesystem:
- filePathPrefix: /var/lib/m3db
-
- config:
+ # Note: cannot use type: "m3db_single_node" since sometimes
+ # multiple DB nodes spawned using the m3_stack start script
+ # and as such the non-seed nodes need to point etcd not to
+ # localhost but to m3db_seed:2379 specifically.
+ discovery:
+ config:
service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - m3db_seed:2379
- seedNodes:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - m3db_seed:2379
+ seedNodes:
initialCluster:
- - hostID: m3db_seed
- endpoint: http://m3db_seed:2380
+ - hostID: m3db_seed
+ endpoint: http://m3db_seed:2380
# proto:
# schemaFilePath: /etc/m3dbnode/schema.proto
diff --git a/scripts/docker-integration-tests/aggregator/m3coordinator.yml b/scripts/docker-integration-tests/aggregator/m3coordinator.yml
index 35a42c248f..10ba278c91 100644
--- a/scripts/docker-integration-tests/aggregator/m3coordinator.yml
+++ b/scripts/docker-integration-tests/aggregator/m3coordinator.yml
@@ -1,21 +1,5 @@
listenAddress: 0.0.0.0:7202
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
-tagOptions:
- idScheme: quoted
-
carbon:
ingester:
listenAddress: "0.0.0.0:7204"
@@ -28,15 +12,7 @@ carbon:
retention: 6h
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- resolution: 10s
- retention: 6h
- - namespace: unagg
- type: unaggregated
- retention: 1s
- client:
+ - client:
config:
service:
env: default_env
@@ -47,8 +23,6 @@ clusters:
- zone: embedded
endpoints:
- dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
downsample:
rules:
diff --git a/scripts/docker-integration-tests/aggregator/test.sh b/scripts/docker-integration-tests/aggregator/test.sh
index 0c48d2ddd5..8d287df7e0 100755
--- a/scripts/docker-integration-tests/aggregator/test.sh
+++ b/scripts/docker-integration-tests/aggregator/test.sh
@@ -29,7 +29,7 @@ function defer {
trap defer EXIT
echo "Setup DB node"
-setup_single_m3db_node
+AGG_RESOLUTION=10s AGG_RETENTION=6h setup_single_m3db_node
echo "Initializing aggregator topology"
curl -vvvsSf -X POST -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/services/m3aggregator/placement/init -d '{
diff --git a/scripts/docker-integration-tests/aggregator_legacy/m3coordinator.yml b/scripts/docker-integration-tests/aggregator_legacy/m3coordinator.yml
index db8feefcd7..acc91e156e 100644
--- a/scripts/docker-integration-tests/aggregator_legacy/m3coordinator.yml
+++ b/scripts/docker-integration-tests/aggregator_legacy/m3coordinator.yml
@@ -1,21 +1,5 @@
listenAddress: 0.0.0.0:7202
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
-tagOptions:
- idScheme: quoted
-
carbon:
ingester:
listenAddress: "0.0.0.0:7204"
@@ -28,15 +12,7 @@ carbon:
retention: 6h
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- resolution: 10s
- retention: 6h
- - namespace: unagg
- type: unaggregated
- retention: 1s
- client:
+ - client:
config:
service:
env: default_env
@@ -47,8 +23,6 @@ clusters:
- zone: embedded
endpoints:
- dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
downsample:
remoteAggregator:
diff --git a/scripts/docker-integration-tests/aggregator_legacy/test.sh b/scripts/docker-integration-tests/aggregator_legacy/test.sh
index 5a116ececd..4171fd3b09 100755
--- a/scripts/docker-integration-tests/aggregator_legacy/test.sh
+++ b/scripts/docker-integration-tests/aggregator_legacy/test.sh
@@ -22,7 +22,7 @@ function defer {
trap defer EXIT
echo "Setup DB node"
-setup_single_m3db_node
+AGG_RESOLUTION=10s AGG_RETENTION=6h setup_single_m3db_node
echo "Initializing aggregator topology"
curl -vvvsSf -X POST -H "Cluster-Environment-Name: override_test_env" localhost:7201/api/v1/services/m3aggregator/placement/init -d '{
diff --git a/scripts/docker-integration-tests/carbon/m3coordinator.yml b/scripts/docker-integration-tests/carbon/m3coordinator.yml
index ba69263bc6..dcff5f3a08 100644
--- a/scripts/docker-integration-tests/carbon/m3coordinator.yml
+++ b/scripts/docker-integration-tests/carbon/m3coordinator.yml
@@ -1,28 +1,5 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 5s
- - namespace: unagg
- type: unaggregated
- retention: 10m
- client:
+ - client:
config:
service:
env: default_env
@@ -33,8 +10,6 @@ clusters:
- zone: embedded
endpoints:
- dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
carbon:
ingester:
@@ -56,6 +31,3 @@ carbon:
policies:
- resolution: 5s
retention: 10h
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/docker-integration-tests/carbon/test.sh b/scripts/docker-integration-tests/carbon/test.sh
index d88c88a534..3094318a62 100755
--- a/scripts/docker-integration-tests/carbon/test.sh
+++ b/scripts/docker-integration-tests/carbon/test.sh
@@ -19,7 +19,7 @@ function defer {
}
trap defer EXIT
-setup_single_m3db_node
+AGG_RESOLUTION=5s setup_single_m3db_node
function read_carbon {
target=$1
diff --git a/scripts/docker-integration-tests/cold_writes_simple/m3coordinator.yml b/scripts/docker-integration-tests/cold_writes_simple/m3coordinator.yml
index 64ae45cdca..cc33cf4021 100644
--- a/scripts/docker-integration-tests/cold_writes_simple/m3coordinator.yml
+++ b/scripts/docker-integration-tests/cold_writes_simple/m3coordinator.yml
@@ -1,32 +1,9 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
limits:
perQuery:
maxFetchedSeries: 100
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 15s
- - namespace: unagg
- type: unaggregated
- retention: 10h
- client:
+ - client:
config:
service:
env: default_env
@@ -37,8 +14,3 @@ clusters:
- zone: embedded
endpoints:
- dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/docker-integration-tests/common.sh b/scripts/docker-integration-tests/common.sh
index ab54fd4c3a..918ab3b76a 100644
--- a/scripts/docker-integration-tests/common.sh
+++ b/scripts/docker-integration-tests/common.sh
@@ -74,10 +74,32 @@ function setup_single_m3db_node_long_namespaces {
]
}'
+ echo "Updating aggregation options for agg namespace"
+ curl -vvvsSf -X PUT 0.0.0.0:${coordinator_port}/api/v1/services/m3db/namespace -d '{
+ "name": "agg",
+ "options": {
+ "aggregationOptions": {
+ "aggregations": [
+ {
+ "aggregated": true,
+ "attributes": {
+ "resolutionDuration": "30s",
+ "downsampleOptions": { "all": false }
+ }
+ }
+ ]
+ }
+ }
+ }'
+
echo "Wait until placement is init'd"
ATTEMPTS=10 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
'[ "$(curl -sSf 0.0.0.0:'"${coordinator_port}"'/api/v1/services/m3db/placement | jq .placement.instances.'${dbnode_id}'.id)" == \"'${dbnode_id}'\" ]'
+ echo "Wait until agg namespace is ready"
+ ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
+ '[ "$(curl -sSf 0.0.0.0:'"${coordinator_port}"'/api/v1/services/m3db/namespace/ready -d "{ \"name\": \"agg\"}" | grep -c true)" -eq 1 ]'
+
wait_for_namespaces
echo "Adding agg2d namespace"
@@ -86,10 +108,31 @@ function setup_single_m3db_node_long_namespaces {
"retentionTime": "48h"
}'
+ echo "Updating aggregation options for agg namespace"
+ curl -vvvsSf -X PUT 0.0.0.0:${coordinator_port}/api/v1/services/m3db/namespace -d '{
+ "name": "agg2d",
+ "options": {
+ "aggregationOptions": {
+ "aggregations": [
+ {
+ "aggregated": true,
+ "attributes": {
+ "resolutionDuration": "1m",
+ "downsampleOptions": { "all": false }
+ }
+ }
+ ]
+ }
+ }
+ }'
+
echo "Wait until agg2d namespace is init'd"
ATTEMPTS=10 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
'[ "$(curl -sSf 0.0.0.0:'"${coordinator_port}"'/api/v1/services/m3db/namespace | jq .registry.namespaces.agg2d.indexOptions.enabled)" == true ]'
+ echo "Wait until agg2d namespace is ready"
+ ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
+ '[ "$(curl -sSf 0.0.0.0:'"${coordinator_port}"'/api/v1/services/m3db/namespace/ready -d "{ \"name\": \"agg2d\"}" | grep -c true)" -eq 1 ]'
echo "Wait until bootstrapped"
ATTEMPTS=100 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
@@ -103,6 +146,8 @@ function setup_single_m3db_node {
local dbnode_id=${DBNODE_ID:-m3db_local}
local coordinator_port=${COORDINATOR_PORT:-7201}
local zone=${ZONE:-embedded}
+ local agg_resolution=${AGG_RESOLUTION:-15s}
+ local agg_retention=${AGG_RETENTION:-10h}
echo "Wait for API to be available"
ATTEMPTS=100 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
@@ -112,7 +157,7 @@ function setup_single_m3db_node {
curl -vvvsSf -X POST 0.0.0.0:${coordinator_port}/api/v1/database/create -d '{
"type": "cluster",
"namespaceName": "agg",
- "retentionTime": "6h",
+ "retentionTime": "'${agg_retention}'",
"num_shards": 4,
"replicationFactor": 1,
"hosts": [
@@ -127,10 +172,31 @@ function setup_single_m3db_node {
]
}'
+ echo "Updating aggregation options for agg namespace"
+ curl -vvvsSf -X PUT 0.0.0.0:${coordinator_port}/api/v1/services/m3db/namespace -d '{
+ "name": "agg",
+ "options": {
+ "aggregationOptions": {
+ "aggregations": [
+ {
+ "aggregated": true,
+ "attributes": {
+ "resolutionDuration": "'${agg_resolution}'"
+ }
+ }
+ ]
+ }
+ }
+ }'
+
echo "Wait until placement is init'd"
ATTEMPTS=10 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
'[ "$(curl -sSf 0.0.0.0:'"${coordinator_port}"'/api/v1/services/m3db/placement | jq .placement.instances.'${dbnode_id}'.id)" == \"'${dbnode_id}'\" ]'
+ echo "Wait until agg namespace is ready"
+ ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
+ '[ "$(curl -sSf 0.0.0.0:'"${coordinator_port}"'/api/v1/services/m3db/namespace/ready -d "{ \"name\": \"agg\"}" | grep -c true)" -eq 1 ]'
+
wait_for_namespaces
echo "Wait until bootstrapped"
@@ -147,6 +213,8 @@ function setup_two_m3db_nodes {
local dbnode_host_1_health_port=${DBNODE_HEALTH_PORT_01:-9012}
local dbnode_host_2_health_port=${DBNODE_HEALTH_PORT_02:-9022}
local coordinator_port=${COORDINATOR_PORT:-7201}
+ local agg_resolution=${AGG_RESOLUTION:-15s}
+ local agg_retention=${AGG_RETENTION:-10h}
echo "Wait for API to be available"
ATTEMPTS=100 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
@@ -156,7 +224,7 @@ function setup_two_m3db_nodes {
curl -vvvsSf -X POST 0.0.0.0:${coordinator_port}/api/v1/database/create -d '{
"type": "cluster",
"namespaceName": "agg",
- "retentionTime": "6h",
+ "retentionTime": "'${agg_retention}'",
"num_shards": 2,
"replicationFactor": 2,
"hosts": [
@@ -179,10 +247,31 @@ function setup_two_m3db_nodes {
]
}'
+ echo "Updating aggregation options for agg namespace"
+ curl -vvvsSf -X PUT 0.0.0.0:${coordinator_port}/api/v1/services/m3db/namespace -d '{
+ "name": "agg",
+ "options": {
+ "aggregationOptions": {
+ "aggregations": [
+ {
+ "aggregated": true,
+ "attributes": {
+ "resolutionDuration": "'${agg_resolution}'"
+ }
+ }
+ ]
+ }
+ }
+ }'
+
echo "Wait until placement is init'd"
ATTEMPTS=10 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
'[ "$(curl -sSf 0.0.0.0:'"${coordinator_port}"'/api/v1/services/m3db/placement | jq .placement.instances.'"${dbnode_id_1}"'.id)" == \"'"${dbnode_id_1}"'\" ]'
+ echo "Wait until agg namespace is ready"
+ ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
+ '[ "$(curl -sSf 0.0.0.0:'"${coordinator_port}"'/api/v1/services/m3db/namespace/ready -d "{ \"name\": \"agg\"}" | grep -c true)" -eq 1 ]'
+
wait_for_namespaces
echo "Wait until bootstrapped"
@@ -194,6 +283,7 @@ function setup_two_m3db_nodes {
function wait_for_namespaces {
local coordinator_port=${COORDINATOR_PORT:-7201}
+ local unagg_retention=${UNAGG_RETENTION:-10h}
echo "Wait until agg namespace is init'd"
ATTEMPTS=10 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
@@ -202,13 +292,17 @@ function wait_for_namespaces {
echo "Adding unagg namespace"
curl -vvvsSf -X POST 0.0.0.0:${coordinator_port}/api/v1/database/namespace/create -d '{
"namespaceName": "unagg",
- "retentionTime": "6h"
+ "retentionTime": "'${unagg_retention}'"
}'
echo "Wait until unagg namespace is init'd"
ATTEMPTS=10 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
'[ "$(curl -sSf 0.0.0.0:'"${coordinator_port}"'/api/v1/services/m3db/namespace | jq .registry.namespaces.unagg.indexOptions.enabled)" == true ]'
+ echo "Wait until unagg namespace is ready"
+ ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff \
+ '[ "$(curl -sSf 0.0.0.0:'"${coordinator_port}"'/api/v1/services/m3db/namespace/ready -d "{ \"name\": \"unagg\"}" | grep -c true)" -eq 1 ]'
+
echo "Adding coldWritesRepairAndNoIndex namespace"
curl -vvvsSf -X POST 0.0.0.0:${coordinator_port}/api/v1/services/m3db/namespace -d '{
"name": "coldWritesRepairAndNoIndex",
@@ -227,6 +321,11 @@ function wait_for_namespaces {
"bufferPastDuration": "10m",
"blockDataExpiry": true,
"blockDataExpiryAfterNotAccessPeriodDuration": "5m"
+ },
+ "aggregationOptions": {
+ "aggregations": [
+ { "aggregated": false }
+ ]
}
}
}'
diff --git a/scripts/docker-integration-tests/coordinator_config_rules/m3coordinator.yml b/scripts/docker-integration-tests/coordinator_config_rules/m3coordinator.yml
index 1ad54297c1..952c9a7ddf 100644
--- a/scripts/docker-integration-tests/coordinator_config_rules/m3coordinator.yml
+++ b/scripts/docker-integration-tests/coordinator_config_rules/m3coordinator.yml
@@ -1,36 +1,5 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 24h
- resolution: 30s
- downsample:
- all: false
- - namespace: agg2d
- type: aggregated
- retention: 48h
- resolution: 1m
- downsample:
- all: false
- - namespace: unagg
- type: unaggregated
- retention: 10m
- client:
+ - client:
config:
service:
env: default_env
@@ -41,8 +10,6 @@ clusters:
- zone: embedded
endpoints:
- dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
downsample:
rules:
@@ -77,6 +44,3 @@ downsample:
bufferPastLimits:
- resolution: 0s
bufferPast: 90s
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/docker-integration-tests/dedicated_etcd_embedded_coordinator/m3dbnode.yml b/scripts/docker-integration-tests/dedicated_etcd_embedded_coordinator/m3dbnode.yml
index d94b0d3993..11795536c2 100644
--- a/scripts/docker-integration-tests/dedicated_etcd_embedded_coordinator/m3dbnode.yml
+++ b/scripts/docker-integration-tests/dedicated_etcd_embedded_coordinator/m3dbnode.yml
@@ -18,57 +18,14 @@ coordinator:
idScheme: quoted
db:
- logging:
- level: info
-
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
hostID:
resolver: environment
envVarName: M3DB_HOST_ID
- client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- cache:
- series:
- policy: lru
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
-
- filesystem:
- filePathPrefix: /var/lib/m3db
-
- config:
- service:
+ discovery:
+ type: m3db_cluster
+ m3dbCluster:
env: foo-namespace/foo-cluster
zone: bar-zone
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: bar-zone
- endpoints:
- - etcd01:2379
+ endpoints:
+ - etcd01:2379
diff --git a/scripts/docker-integration-tests/multi_cluster_write/m3coordinator-cluster-a.yml b/scripts/docker-integration-tests/multi_cluster_write/m3coordinator-cluster-a.yml
index 1a165ab6b9..e393f2ef22 100644
--- a/scripts/docker-integration-tests/multi_cluster_write/m3coordinator-cluster-a.yml
+++ b/scripts/docker-integration-tests/multi_cluster_write/m3coordinator-cluster-a.yml
@@ -1,32 +1,9 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
limits:
perQuery:
maxFetchedSeries: 100
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 15s
- - namespace: unagg
- type: unaggregated
- retention: 10h
- client:
+ - client:
config:
service:
env: default_env
@@ -37,8 +14,3 @@ clusters:
- zone: embedded
endpoints:
- cluster_a_dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/docker-integration-tests/multi_cluster_write/m3coordinator-cluster-b.yml b/scripts/docker-integration-tests/multi_cluster_write/m3coordinator-cluster-b.yml
index 0f434a0865..7933a84078 100644
--- a/scripts/docker-integration-tests/multi_cluster_write/m3coordinator-cluster-b.yml
+++ b/scripts/docker-integration-tests/multi_cluster_write/m3coordinator-cluster-b.yml
@@ -1,32 +1,9 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
limits:
perQuery:
maxFetchedSeries: 100
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 15s
- - namespace: unagg
- type: unaggregated
- retention: 10h
- client:
+ - client:
config:
service:
env: default_env
@@ -37,8 +14,3 @@ clusters:
- zone: embedded
endpoints:
- cluster_b_dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/docker-integration-tests/multi_cluster_write/m3dbnode-cluster-a.yml b/scripts/docker-integration-tests/multi_cluster_write/m3dbnode-cluster-a.yml
index 5527f12830..13aac06336 100644
--- a/scripts/docker-integration-tests/multi_cluster_write/m3dbnode-cluster-a.yml
+++ b/scripts/docker-integration-tests/multi_cluster_write/m3dbnode-cluster-a.yml
@@ -1,36 +1,9 @@
db:
- logging:
- level: info
-
- tracing:
- backend: jaeger
- jaeger:
- reporter:
- localAgentHostPort: jaeger:6831
- sampler:
- type: const
- param: 1
-
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
hostID:
resolver: environment
envVarName: M3DB_HOST_ID
client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
config:
services:
- service:
@@ -53,39 +26,18 @@ db:
- cluster_b_dbnode01:2379
async: true
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- cache:
- series:
- policy: lru
- postingsList:
- size: 262144
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
-
- filesystem:
- filePathPrefix: /var/lib/m3db
-
- config:
- service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - cluster_a_dbnode01:2379
- seedNodes:
- initialCluster:
- - hostID: cluster_a_m3db_local_1
- endpoint: http://cluster_a_dbnode01:2380
+ discovery:
+ config:
+ service:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - cluster_a_dbnode01:2379
+ seedNodes:
+ initialCluster:
+ - hostID: cluster_a_m3db_local_1
+ endpoint: http://cluster_a_dbnode01:2380
diff --git a/scripts/docker-integration-tests/multi_cluster_write/m3dbnode-cluster-b.yml b/scripts/docker-integration-tests/multi_cluster_write/m3dbnode-cluster-b.yml
index edbfcc298b..9e59d22898 100644
--- a/scripts/docker-integration-tests/multi_cluster_write/m3dbnode-cluster-b.yml
+++ b/scripts/docker-integration-tests/multi_cluster_write/m3dbnode-cluster-b.yml
@@ -1,70 +1,20 @@
db:
- logging:
- level: info
-
- tracing:
- backend: jaeger
- jaeger:
- reporter:
- localAgentHostPort: jaeger:6831
- sampler:
- type: const
- param: 1
-
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
hostID:
resolver: environment
envVarName: M3DB_HOST_ID
- client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- cache:
- series:
- policy: lru
- postingsList:
- size: 262144
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
-
- filesystem:
- filePathPrefix: /var/lib/m3db
-
- config:
- service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - cluster_b_dbnode01:2379
- seedNodes:
- initialCluster:
- - hostID: cluster_b_m3db_local_1
- endpoint: http://cluster_b_dbnode01:2380
+ discovery:
+ config:
+ service:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - cluster_b_dbnode01:2379
+ seedNodes:
+ initialCluster:
+ - hostID: cluster_b_m3db_local_1
+ endpoint: http://cluster_b_dbnode01:2380
diff --git a/scripts/docker-integration-tests/prometheus/m3coordinator.yml b/scripts/docker-integration-tests/prometheus/m3coordinator.yml
index f9d46f31b3..cd1a97eb82 100644
--- a/scripts/docker-integration-tests/prometheus/m3coordinator.yml
+++ b/scripts/docker-integration-tests/prometheus/m3coordinator.yml
@@ -1,32 +1,9 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
limits:
perQuery:
maxFetchedSeries: 100
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 15s
- - namespace: unagg
- type: unaggregated
- retention: 10m
- client:
+ - client:
config:
service:
env: default_env
@@ -37,11 +14,6 @@ clusters:
- zone: embedded
endpoints:
- dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
-tagOptions:
- idScheme: quoted
query:
restrictTags:
diff --git a/scripts/docker-integration-tests/prometheus_replication/m3coordinator01.yml b/scripts/docker-integration-tests/prometheus_replication/m3coordinator01.yml
index 83d61c1b74..426653f6cc 100644
--- a/scripts/docker-integration-tests/prometheus_replication/m3coordinator01.yml
+++ b/scripts/docker-integration-tests/prometheus_replication/m3coordinator01.yml
@@ -1,33 +1,10 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
writeForwarding:
promRemoteWrite:
targets:
- url: http://coordinator02:7201/api/v1/prom/remote/write
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 15s
- - namespace: unagg
- type: unaggregated
- retention: 10m
- client:
+ - client:
config:
service:
env: default_env
@@ -38,8 +15,3 @@ clusters:
- zone: embedded
endpoints:
- dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/docker-integration-tests/prometheus_replication/m3coordinator02.yml b/scripts/docker-integration-tests/prometheus_replication/m3coordinator02.yml
index 8d15b050d8..d7a81d02b4 100644
--- a/scripts/docker-integration-tests/prometheus_replication/m3coordinator02.yml
+++ b/scripts/docker-integration-tests/prometheus_replication/m3coordinator02.yml
@@ -1,28 +1,5 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 15s
- - namespace: unagg
- type: unaggregated
- retention: 10m
- client:
+ - client:
config:
service:
env: default_env
@@ -33,8 +10,3 @@ clusters:
- zone: embedded
endpoints:
- dbnode02:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-a.yml b/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-a.yml
index a3ef780416..d9703deed2 100644
--- a/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-a.yml
+++ b/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-a.yml
@@ -1,18 +1,3 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
# Fanout queries to remote clusters
rpc:
enabled: true
@@ -24,15 +9,7 @@ rpc:
remoteListenAddresses: ["coordinator-cluster-c:7202"]
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 5s
- - namespace: unagg
- type: unaggregated
- retention: 10m
- client:
+ - client:
config:
service:
env: default_env
@@ -43,8 +20,6 @@ clusters:
- zone: embedded
endpoints:
- dbnode-cluster-a:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
carbon:
ingester:
@@ -55,9 +30,6 @@ carbon:
- resolution: 5s
retention: 10h
-tagOptions:
- idScheme: quoted
-
# Use tag consolidation here; other integration tests handle id consolidations.
query:
consolidation:
diff --git a/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-b.yml b/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-b.yml
index 2464d98685..7e27bb0a00 100644
--- a/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-b.yml
+++ b/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-b.yml
@@ -1,18 +1,3 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
# Fanout queries to remote clusters
rpc:
enabled: true
@@ -24,15 +9,7 @@ rpc:
remoteListenAddresses: ["coordinator-cluster-c:7202"]
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 5s
- - namespace: unagg
- type: unaggregated
- retention: 10m
- client:
+ - client:
config:
service:
env: default_env
@@ -43,8 +20,6 @@ clusters:
- zone: embedded
endpoints:
- dbnode-cluster-b:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
carbon:
ingester:
@@ -54,10 +29,6 @@ carbon:
policies:
- resolution: 5s
retention: 10h
-
-tagOptions:
- idScheme: quoted
-
query:
consolidation:
matchType: tags
\ No newline at end of file
diff --git a/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-c.yml b/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-c.yml
index 034036b6ef..2ec60faa05 100644
--- a/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-c.yml
+++ b/scripts/docker-integration-tests/query_fanout/m3coordinator-cluster-c.yml
@@ -1,18 +1,3 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
# Fanout queries to remote clusters
rpc:
enabled: true
@@ -24,15 +9,7 @@ rpc:
remoteListenAddresses: ["coordinator-cluster-b:7202"]
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 5s
- - namespace: unagg
- type: unaggregated
- retention: 10m
- client:
+ - client:
config:
service:
env: default_env
@@ -43,8 +20,6 @@ clusters:
- zone: embedded
endpoints:
- dbnode-cluster-c:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
carbon:
ingester:
@@ -55,9 +30,6 @@ carbon:
- resolution: 5s
retention: 10h
-tagOptions:
- idScheme: quoted
-
query:
consolidation:
matchType: tags
\ No newline at end of file
diff --git a/scripts/docker-integration-tests/query_fanout/test.sh b/scripts/docker-integration-tests/query_fanout/test.sh
index 5a4321e2dc..413ee4fc53 100755
--- a/scripts/docker-integration-tests/query_fanout/test.sh
+++ b/scripts/docker-integration-tests/query_fanout/test.sh
@@ -28,13 +28,13 @@ function defer {
}
trap defer EXIT
-DBNODE_HOST=dbnode-cluster-a DBDNODE_PORT=9000 DBNODE_HEALTH_PORT=9002 COORDINATOR_PORT=7201 \
+AGG_RESOLUTION=5s DBNODE_HOST=dbnode-cluster-a DBDNODE_PORT=9000 DBNODE_HEALTH_PORT=9002 COORDINATOR_PORT=7201 \
setup_single_m3db_node
-DBNODE_HOST=dbnode-cluster-b DBDNODE_PORT=19000 DBNODE_HEALTH_PORT=19002 COORDINATOR_PORT=17201 \
+AGG_RESOLUTION=5s DBNODE_HOST=dbnode-cluster-b DBDNODE_PORT=19000 DBNODE_HEALTH_PORT=19002 COORDINATOR_PORT=17201 \
setup_single_m3db_node
-DBNODE_HOST=dbnode-cluster-c DBDNODE_PORT=29000 DBNODE_HEALTH_PORT=29002 COORDINATOR_PORT=27201 \
+AGG_RESOLUTION=5s DBNODE_HOST=dbnode-cluster-c DBDNODE_PORT=29000 DBNODE_HEALTH_PORT=29002 COORDINATOR_PORT=27201 \
setup_single_m3db_node
echo "Write data to cluster a"
diff --git a/scripts/docker-integration-tests/repair/m3coordinator.yml b/scripts/docker-integration-tests/repair/m3coordinator.yml
index 64ae45cdca..cc33cf4021 100644
--- a/scripts/docker-integration-tests/repair/m3coordinator.yml
+++ b/scripts/docker-integration-tests/repair/m3coordinator.yml
@@ -1,32 +1,9 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
limits:
perQuery:
maxFetchedSeries: 100
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 15s
- - namespace: unagg
- type: unaggregated
- retention: 10h
- client:
+ - client:
config:
service:
env: default_env
@@ -37,8 +14,3 @@ clusters:
- zone: embedded
endpoints:
- dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/docker-integration-tests/repair/m3dbnode.yml b/scripts/docker-integration-tests/repair/m3dbnode.yml
index b6f5cb5436..29dcc22d4b 100644
--- a/scripts/docker-integration-tests/repair/m3dbnode.yml
+++ b/scripts/docker-integration-tests/repair/m3dbnode.yml
@@ -1,77 +1,26 @@
db:
- logging:
- level: info
-
- tracing:
- backend: jaeger
- jaeger:
- reporter:
- localAgentHostPort: jaeger:6831
- sampler:
- type: const
- param: 1
-
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
hostID:
resolver: environment
envVarName: M3DB_HOST_ID
- client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- cache:
- series:
- policy: lru
- postingsList:
- size: 262144
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
-
- filesystem:
- filePathPrefix: /var/lib/m3db
-
- config:
+ discovery:
+ config:
service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - dbnode01:2379
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - dbnode01:2379
seedNodes:
- initialCluster:
- - hostID: m3db_local_1
- endpoint: http://dbnode01:2380
+ initialCluster:
+ - hostID: m3db_local_1
+ endpoint: http://dbnode01:2380
# Enable repairs.
repair:
enabled: true
throttle: 1ms
checkInterval: 1ms
-
diff --git a/scripts/docker-integration-tests/repair_and_replication/m3coordinator-cluster-a.yml b/scripts/docker-integration-tests/repair_and_replication/m3coordinator-cluster-a.yml
index 1a165ab6b9..6695f7be3c 100644
--- a/scripts/docker-integration-tests/repair_and_replication/m3coordinator-cluster-a.yml
+++ b/scripts/docker-integration-tests/repair_and_replication/m3coordinator-cluster-a.yml
@@ -18,15 +18,7 @@ limits:
maxFetchedSeries: 100
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 15s
- - namespace: unagg
- type: unaggregated
- retention: 10h
- client:
+ - client:
config:
service:
env: default_env
@@ -37,8 +29,3 @@ clusters:
- zone: embedded
endpoints:
- cluster_a_dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/docker-integration-tests/repair_and_replication/m3coordinator-cluster-b.yml b/scripts/docker-integration-tests/repair_and_replication/m3coordinator-cluster-b.yml
index 0f434a0865..7933a84078 100644
--- a/scripts/docker-integration-tests/repair_and_replication/m3coordinator-cluster-b.yml
+++ b/scripts/docker-integration-tests/repair_and_replication/m3coordinator-cluster-b.yml
@@ -1,32 +1,9 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
limits:
perQuery:
maxFetchedSeries: 100
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 15s
- - namespace: unagg
- type: unaggregated
- retention: 10h
- client:
+ - client:
config:
service:
env: default_env
@@ -37,8 +14,3 @@ clusters:
- zone: embedded
endpoints:
- cluster_b_dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/docker-integration-tests/repair_and_replication/m3dbnode-cluster-a.yml b/scripts/docker-integration-tests/repair_and_replication/m3dbnode-cluster-a.yml
index 93dedab49d..3c3b91f4bf 100644
--- a/scripts/docker-integration-tests/repair_and_replication/m3dbnode-cluster-a.yml
+++ b/scripts/docker-integration-tests/repair_and_replication/m3dbnode-cluster-a.yml
@@ -1,73 +1,23 @@
db:
- logging:
- level: info
-
- tracing:
- backend: jaeger
- jaeger:
- reporter:
- localAgentHostPort: jaeger:6831
- sampler:
- type: const
- param: 1
-
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
hostID:
resolver: environment
envVarName: M3DB_HOST_ID
- client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- cache:
- series:
- policy: lru
- postingsList:
- size: 262144
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
-
- filesystem:
- filePathPrefix: /var/lib/m3db
-
- config:
- service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - cluster_a_dbnode01:2379
- seedNodes:
- initialCluster:
- - hostID: cluster_a_m3db_local_1
- endpoint: http://cluster_a_dbnode01:2380
+ discovery:
+ config:
+ service:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - cluster_a_dbnode01:2379
+ seedNodes:
+ initialCluster:
+ - hostID: cluster_a_m3db_local_1
+ endpoint: http://cluster_a_dbnode01:2380
# Enable repairs (within cluster a).
repair:
diff --git a/scripts/docker-integration-tests/repair_and_replication/m3dbnode-cluster-b.yml b/scripts/docker-integration-tests/repair_and_replication/m3dbnode-cluster-b.yml
index b2c5b2c650..920ab4db81 100644
--- a/scripts/docker-integration-tests/repair_and_replication/m3dbnode-cluster-b.yml
+++ b/scripts/docker-integration-tests/repair_and_replication/m3dbnode-cluster-b.yml
@@ -1,73 +1,23 @@
db:
- logging:
- level: info
-
- tracing:
- backend: jaeger
- jaeger:
- reporter:
- localAgentHostPort: jaeger:6831
- sampler:
- type: const
- param: 1
-
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
hostID:
resolver: environment
envVarName: M3DB_HOST_ID
- client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- cache:
- series:
- policy: lru
- postingsList:
- size: 262144
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
-
- filesystem:
- filePathPrefix: /var/lib/m3db
-
- config:
+ discovery:
+ config:
service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - cluster_b_dbnode01:2379
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - cluster_b_dbnode01:2379
seedNodes:
- initialCluster:
- - hostID: cluster_b_m3db_local_1
- endpoint: http://cluster_b_dbnode01:2380
+ initialCluster:
+ - hostID: cluster_b_m3db_local_1
+ endpoint: http://cluster_b_dbnode01:2380
# Enable repairs (within cluster b).
repair:
diff --git a/scripts/docker-integration-tests/replication/m3coordinator-cluster-a.yml b/scripts/docker-integration-tests/replication/m3coordinator-cluster-a.yml
index 1a165ab6b9..e393f2ef22 100644
--- a/scripts/docker-integration-tests/replication/m3coordinator-cluster-a.yml
+++ b/scripts/docker-integration-tests/replication/m3coordinator-cluster-a.yml
@@ -1,32 +1,9 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
limits:
perQuery:
maxFetchedSeries: 100
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 15s
- - namespace: unagg
- type: unaggregated
- retention: 10h
- client:
+ - client:
config:
service:
env: default_env
@@ -37,8 +14,3 @@ clusters:
- zone: embedded
endpoints:
- cluster_a_dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/docker-integration-tests/replication/m3coordinator-cluster-b.yml b/scripts/docker-integration-tests/replication/m3coordinator-cluster-b.yml
index 0f434a0865..7933a84078 100644
--- a/scripts/docker-integration-tests/replication/m3coordinator-cluster-b.yml
+++ b/scripts/docker-integration-tests/replication/m3coordinator-cluster-b.yml
@@ -1,32 +1,9 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
limits:
perQuery:
maxFetchedSeries: 100
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 15s
- - namespace: unagg
- type: unaggregated
- retention: 10h
- client:
+ - client:
config:
service:
env: default_env
@@ -37,8 +14,3 @@ clusters:
- zone: embedded
endpoints:
- cluster_b_dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/docker-integration-tests/replication/m3dbnode-cluster-a.yml b/scripts/docker-integration-tests/replication/m3dbnode-cluster-a.yml
index 9ba35ef331..52e75125c5 100644
--- a/scripts/docker-integration-tests/replication/m3dbnode-cluster-a.yml
+++ b/scripts/docker-integration-tests/replication/m3dbnode-cluster-a.yml
@@ -1,73 +1,23 @@
db:
- logging:
- level: info
-
- tracing:
- backend: jaeger
- jaeger:
- reporter:
- localAgentHostPort: jaeger:6831
- sampler:
- type: const
- param: 1
-
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
hostID:
resolver: environment
envVarName: M3DB_HOST_ID
- client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- cache:
- series:
- policy: lru
- postingsList:
- size: 262144
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
-
- filesystem:
- filePathPrefix: /var/lib/m3db
-
- config:
+ discovery:
+ config:
service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - cluster_a_dbnode01:2379
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - cluster_a_dbnode01:2379
seedNodes:
- initialCluster:
- - hostID: cluster_a_m3db_local_1
- endpoint: http://cluster_a_dbnode01:2380
+ initialCluster:
+ - hostID: cluster_a_m3db_local_1
+ endpoint: http://cluster_a_dbnode01:2380
# Disable repairs (within cluster a).
repair:
@@ -91,4 +41,3 @@ db:
- zone: embedded
endpoints:
- cluster_b_dbnode01:2379
-
diff --git a/scripts/docker-integration-tests/replication/m3dbnode-cluster-b.yml b/scripts/docker-integration-tests/replication/m3dbnode-cluster-b.yml
index c3097f8df8..2d04a09719 100644
--- a/scripts/docker-integration-tests/replication/m3dbnode-cluster-b.yml
+++ b/scripts/docker-integration-tests/replication/m3dbnode-cluster-b.yml
@@ -1,73 +1,23 @@
db:
- logging:
- level: info
-
- tracing:
- backend: jaeger
- jaeger:
- reporter:
- localAgentHostPort: jaeger:6831
- sampler:
- type: const
- param: 1
-
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
hostID:
resolver: environment
envVarName: M3DB_HOST_ID
- client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- cache:
- series:
- policy: lru
- postingsList:
- size: 262144
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
-
- filesystem:
- filePathPrefix: /var/lib/m3db
-
- config:
+ discovery:
+ config:
service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - cluster_b_dbnode01:2379
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - cluster_b_dbnode01:2379
seedNodes:
- initialCluster:
- - hostID: cluster_b_m3db_local_1
- endpoint: http://cluster_b_dbnode01:2380
+ initialCluster:
+ - hostID: cluster_b_m3db_local_1
+ endpoint: http://cluster_b_dbnode01:2380
# Disable repairs (within cluster b).
repair:
@@ -93,4 +43,3 @@ db:
- zone: embedded
endpoints:
- cluster_a_dbnode01:2379
-
diff --git a/scripts/docker-integration-tests/simple_v2_batch_apis/m3coordinator.yml b/scripts/docker-integration-tests/simple_v2_batch_apis/m3coordinator.yml
index 68f407d984..7d5cb12598 100644
--- a/scripts/docker-integration-tests/simple_v2_batch_apis/m3coordinator.yml
+++ b/scripts/docker-integration-tests/simple_v2_batch_apis/m3coordinator.yml
@@ -1,32 +1,9 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
limits:
perQuery:
maxFetchedSeries: 100
clusters:
- - namespaces:
- - namespace: agg
- type: aggregated
- retention: 10h
- resolution: 15s
- - namespace: unagg
- type: unaggregated
- retention: 10m
- client:
+ - client:
config:
service:
env: default_env
@@ -37,9 +14,4 @@ clusters:
- zone: embedded
endpoints:
- dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
useV2BatchAPIs: true
-
-tagOptions:
- idScheme: quoted
diff --git a/scripts/install-gometalinter.sh b/scripts/install-gometalinter.sh
deleted file mode 100755
index 07612af0a6..0000000000
--- a/scripts/install-gometalinter.sh
+++ /dev/null
@@ -1,391 +0,0 @@
-#!/bin/sh
-#
-# Taken from: https://github.com/alecthomas/gometalinter/blob/master/scripts/install.sh
-
-set -e
-# Code generated by godownloader on 2018-05-08T08:53:30Z. DO NOT EDIT.
-#
-
-usage() {
- this=$1
- cat </dev/null
-}
-echoerr() {
- echo "$@" 1>&2
-}
-log_prefix() {
- echo "$0"
-}
-_logp=6
-log_set_priority() {
- _logp="$1"
-}
-log_priority() {
- if test -z "$1"; then
- echo "$_logp"
- return
- fi
- [ "$1" -le "$_logp" ]
-}
-log_tag() {
- case $1 in
- 0) echo "emerg" ;;
- 1) echo "alert" ;;
- 2) echo "crit" ;;
- 3) echo "err" ;;
- 4) echo "warning" ;;
- 5) echo "notice" ;;
- 6) echo "info" ;;
- 7) echo "debug" ;;
- *) echo "$1" ;;
- esac
-}
-log_debug() {
- log_priority 7 || return 0
- echoerr "$(log_prefix)" "$(log_tag 7)" "$@"
-}
-log_info() {
- log_priority 6 || return 0
- echoerr "$(log_prefix)" "$(log_tag 6)" "$@"
-}
-log_err() {
- log_priority 3 || return 0
- echoerr "$(log_prefix)" "$(log_tag 3)" "$@"
-}
-log_crit() {
- log_priority 2 || return 0
- echoerr "$(log_prefix)" "$(log_tag 2)" "$@"
-}
-uname_os() {
- os=$(uname -s | tr '[:upper:]' '[:lower:]')
- case "$os" in
- msys_nt) os="windows" ;;
- esac
- echo "$os"
-}
-uname_arch() {
- arch=$(uname -m)
- case $arch in
- x86_64) arch="amd64" ;;
- x86) arch="386" ;;
- i686) arch="386" ;;
- i386) arch="386" ;;
- aarch64) arch="arm64" ;;
- armv5*) arch="armv5" ;;
- armv6*) arch="armv6" ;;
- armv7*) arch="armv7" ;;
- esac
- echo ${arch}
-}
-uname_os_check() {
- os=$(uname_os)
- case "$os" in
- darwin) return 0 ;;
- dragonfly) return 0 ;;
- freebsd) return 0 ;;
- linux) return 0 ;;
- android) return 0 ;;
- nacl) return 0 ;;
- netbsd) return 0 ;;
- openbsd) return 0 ;;
- plan9) return 0 ;;
- solaris) return 0 ;;
- windows) return 0 ;;
- esac
- log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib"
- return 1
-}
-uname_arch_check() {
- arch=$(uname_arch)
- case "$arch" in
- 386) return 0 ;;
- amd64) return 0 ;;
- arm64) return 0 ;;
- armv5) return 0 ;;
- armv6) return 0 ;;
- armv7) return 0 ;;
- ppc64) return 0 ;;
- ppc64le) return 0 ;;
- mips) return 0 ;;
- mipsle) return 0 ;;
- mips64) return 0 ;;
- mips64le) return 0 ;;
- s390x) return 0 ;;
- amd64p32) return 0 ;;
- esac
- log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib"
- return 1
-}
-untar() {
- tarball=$1
- case "${tarball}" in
- *.tar.gz | *.tgz) tar -xzf "${tarball}" ;;
- *.tar) tar -xf "${tarball}" ;;
- *.zip) unzip "${tarball}" ;;
- *)
- log_err "untar unknown archive format for ${tarball}"
- return 1
- ;;
- esac
-}
-mktmpdir() {
- test -z "$TMPDIR" && TMPDIR="$(mktemp -d)"
- mkdir -p "${TMPDIR}"
- echo "${TMPDIR}"
-}
-http_download_curl() {
- local_file=$1
- source_url=$2
- header=$3
- if [ -z "$header" ]; then
- code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url")
- else
- code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url")
- fi
- if [ "$code" != "200" ]; then
- log_debug "http_download_curl received HTTP status $code"
- return 1
- fi
- return 0
-}
-http_download_wget() {
- local_file=$1
- source_url=$2
- header=$3
- if [ -z "$header" ]; then
- wget -q -O "$local_file" "$source_url"
- else
- wget -q --header "$header" -O "$local_file" "$source_url"
- fi
-}
-http_download() {
- log_debug "http_download $2"
- if is_command curl; then
- http_download_curl "$@"
- return
- elif is_command wget; then
- http_download_wget "$@"
- return
- fi
- log_crit "http_download unable to find wget or curl"
- return 1
-}
-http_copy() {
- tmp=$(mktemp)
- http_download "${tmp}" "$1" "$2" || return 1
- body=$(cat "$tmp")
- rm -f "${tmp}"
- echo "$body"
-}
-github_release() {
- owner_repo=$1
- version=$2
- test -z "$version" && version="latest"
- giturl="https://github.com/${owner_repo}/releases/${version}"
- json=$(http_copy "$giturl" "Accept:application/json")
- test -z "$json" && return 1
- version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//')
- test -z "$version" && return 1
- echo "$version"
-}
-hash_sha256() {
- TARGET=${1:-/dev/stdin}
- if is_command gsha256sum; then
- hash=$(gsha256sum "$TARGET") || return 1
- echo "$hash" | cut -d ' ' -f 1
- elif is_command sha256sum; then
- hash=$(sha256sum "$TARGET") || return 1
- echo "$hash" | cut -d ' ' -f 1
- elif is_command shasum; then
- hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1
- echo "$hash" | cut -d ' ' -f 1
- elif is_command openssl; then
- hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1
- echo "$hash" | cut -d ' ' -f a
- else
- log_crit "hash_sha256 unable to find command to compute sha-256 hash"
- return 1
- fi
-}
-hash_sha256_verify() {
- TARGET=$1
- checksums=$2
- if [ -z "$checksums" ]; then
- log_err "hash_sha256_verify checksum file not specified in arg2"
- return 1
- fi
- BASENAME=${TARGET##*/}
- want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1)
- if [ -z "$want" ]; then
- log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'"
- return 1
- fi
- got=$(hash_sha256 "$TARGET")
- if [ "$want" != "$got" ]; then
- log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got"
- return 1
- fi
-}
-cat /dev/null < \nExample: %[1]s /var/run/lockfile 1 1\n",
- path.Base(os.Args[0]))
- os.Exit(1)
-}
-
-func main() {
- if len(os.Args) != 4 {
- exitWithUsage()
- }
-
- path, sleepStr, rmLock := os.Args[1], os.Args[2], os.Args[3]
- sleep, err := strconv.Atoi(sleepStr)
- if err != nil {
- exitWithUsage()
- }
-
- lock, err := lockfile.Acquire(path)
- if err != nil {
- os.Exit(1)
- }
-
- if sleep > 0 {
- time.Sleep(time.Duration(sleep) * time.Second)
- }
-
- if rmLock != "0" {
- lock.Release()
- }
-}
diff --git a/scripts/run-ci-lint.sh b/scripts/run-ci-lint.sh
new file mode 100755
index 0000000000..e0a331c74c
--- /dev/null
+++ b/scripts/run-ci-lint.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+set -eo pipefail
+
+BIN="$1"
+TARGET="$2"
+
+if [[ ! -x "$BIN" ]]; then
+ echo "$BIN is not a binary"
+ exit 1
+fi
+
+TAGS=()
+if [[ -n "$GO_BUILD_TAGS" ]]; then
+ TAGS=("--build-tags" "${GO_BUILD_TAGS}")
+fi
+
+"$BIN" run "$TARGET" "${TAGS[@]}"
diff --git a/site/content/docs/operational_guide/availability_consistency_durability.md b/site/content/docs/operational_guide/availability_consistency_durability.md
index 873ed4c6aa..1f14ac16ee 100644
--- a/site/content/docs/operational_guide/availability_consistency_durability.md
+++ b/site/content/docs/operational_guide/availability_consistency_durability.md
@@ -57,10 +57,12 @@ This instructs M3DB to handle writes for new timeseries (for a given time block)
However, since new time series are created asynchronously, it's possible that there may be a brief delay inbetween when a write is acknowledged by the client and when that series becomes available for subsequent reads.
-M3DB also allows operators to rate limit the number of new series that can be created per second via the following configuration:
+M3DB also allows operators to rate limit the number of new series that can be created per second via the following configuration under the `db.limits` section:
```yaml
-writeNewSeriesLimitPerSecond: 1048576
+db:
+ limits:
+ writeNewSeriesPerSecond: 1048576
```
This value can be set much lower than the default value for workloads in which a significant increase in cardinality usually indicates a misbehaving caller.
@@ -105,10 +107,12 @@ writeNewSeriesAsync: false
This instructs M3DB to handle writes for new timeseries (for a given time block) synchronously. Creating a new timeseries in memory is much more expensive than simply appending a new write to an existing series, so this configuration could have an adverse effect on performance when many new timeseries are being inserted into M3DB concurrently.
-Since this operation is so expensive, M3DB allows operator to rate limit the number of new series that can be created per second via the following configuration (also a top-level key under the `db` section):
+Since this operation is so expensive, M3DB allows operator to rate limit the number of new series that can be created per second via the following configuration (also a top-level key under the `db.limits` section):
```yaml
-writeNewSeriesLimitPerSecond: 1048576
+db:
+ limits:
+ writeNewSeriesPerSecond: 1048576
```
### Ignoring Corrupt Commitlogs on Bootstrap
diff --git a/src/aggregator/aggregator/election_mgr.go b/src/aggregator/aggregator/election_mgr.go
index 8c685cf2bc..38aa4d0f01 100644
--- a/src/aggregator/aggregator/election_mgr.go
+++ b/src/aggregator/aggregator/election_mgr.go
@@ -572,6 +572,20 @@ func (mgr *electionManager) campaignState() campaignState {
return mgr.campaignStateWatchable.Get().(campaignState)
}
+func (mgr *electionManager) checkCampaignState() {
+ enabled, err := mgr.campaignIsEnabledFn()
+ if err != nil {
+ mgr.metrics.campaignCheckErrors.Inc(1)
+ return
+ }
+ newState := newCampaignState(enabled)
+ currState := mgr.campaignState()
+ if currState == newState {
+ return
+ }
+ mgr.processCampaignStateChange(newState)
+}
+
func (mgr *electionManager) checkCampaignStateLoop() {
defer mgr.Done()
@@ -579,19 +593,9 @@ func (mgr *electionManager) checkCampaignStateLoop() {
defer ticker.Stop()
for {
+ mgr.checkCampaignState()
select {
case <-ticker.C:
- enabled, err := mgr.campaignIsEnabledFn()
- if err != nil {
- mgr.metrics.campaignCheckErrors.Inc(1)
- continue
- }
- newState := newCampaignState(enabled)
- currState := mgr.campaignState()
- if currState == newState {
- continue
- }
- mgr.processCampaignStateChange(newState)
case <-mgr.doneCh:
return
}
diff --git a/src/aggregator/aggregator/election_mgr_test.go b/src/aggregator/aggregator/election_mgr_test.go
index fbb5bbbc75..d88dc70842 100644
--- a/src/aggregator/aggregator/election_mgr_test.go
+++ b/src/aggregator/aggregator/election_mgr_test.go
@@ -134,6 +134,9 @@ func TestElectionManagerOpenSuccess(t *testing.T) {
return make(chan campaign.Status), nil
}).
AnyTimes()
+ leaderService.EXPECT().
+ Resign(gomock.Any()).
+ AnyTimes()
opts := testElectionManagerOptions(t, ctrl).SetLeaderService(leaderService)
mgr := NewElectionManager(opts).(*electionManager)
@@ -245,6 +248,7 @@ func TestElectionManagerResignLeaderServiceResignError(t *testing.T) {
}
func TestElectionManagerResignTimeout(t *testing.T) {
+ t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -272,14 +276,16 @@ func TestElectionManagerResignTimeout(t *testing.T) {
}
func TestElectionManagerResignSuccess(t *testing.T) {
+ t.Parallel()
+
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
var (
- statusCh = make(chan campaign.Status, 2)
+ statusCh = make(chan campaign.Status, 1)
mgr *electionManager
)
@@ -290,7 +296,10 @@ func TestElectionManagerResignSuccess(t *testing.T) {
leaderService.EXPECT().
Resign(gomock.Any()).
DoAndReturn(func(string) error {
- statusCh <- campaign.Status{State: campaign.Follower}
+ select {
+ case statusCh <- campaign.Status{State: campaign.Follower}:
+ default:
+ }
return nil
}).
AnyTimes()
@@ -300,6 +309,7 @@ func TestElectionManagerResignSuccess(t *testing.T) {
campaignOpts = campaignOpts.SetLeaderValue(leaderValue)
opts := testElectionManagerOptions(t, ctrl).
SetCampaignOptions(campaignOpts).
+ // SetCampaignStateCheckInterval(1 * time.Second).
SetLeaderService(leaderService)
i := placement.NewInstance().SetID("myself")
opts.PlacementManager().(*MockPlacementManager).
@@ -319,9 +329,20 @@ func TestElectionManagerResignSuccess(t *testing.T) {
require.NoError(t, mgr.Open(testShardSetID))
require.NoError(t, mgr.Resign(ctx))
- time.Sleep(time.Second)
+
+ var mgrState electionManagerState
+ for i := 0; i < 10; i++ {
+ mgr.RLock()
+ mgrState = mgr.state
+ mgr.RUnlock()
+ if mgr.ElectionState() == FollowerState && mgrState == electionManagerOpen {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+
require.Equal(t, FollowerState, mgr.ElectionState())
- require.Equal(t, electionManagerOpen, mgr.state)
+ require.Equal(t, electionManagerOpen, mgrState)
require.NoError(t, mgr.Close())
}
@@ -346,6 +367,8 @@ func TestElectionManagerCloseSuccess(t *testing.T) {
}
func TestElectionManagerCampaignLoop(t *testing.T) {
+ t.Parallel()
+
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -380,6 +403,7 @@ func TestElectionManagerCampaignLoop(t *testing.T) {
campaignOpts = campaignOpts.SetLeaderValue(leaderValue)
opts := testElectionManagerOptions(t, ctrl).
SetCampaignOptions(campaignOpts).
+ SetCampaignStateCheckInterval(100 * time.Millisecond).
SetLeaderService(leaderService)
i := placement.NewInstance().SetID("myself")
opts.PlacementManager().(*MockPlacementManager).
@@ -479,6 +503,8 @@ func TestElectionManagerCampaignLoop(t *testing.T) {
}
func TestElectionManagerVerifyLeaderDelayWithValidLeader(t *testing.T) {
+ t.Parallel()
+
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -544,6 +570,8 @@ func TestElectionManagerVerifyLeaderDelayWithValidLeader(t *testing.T) {
}
func TestElectionManagerVerifyLeaderDelayWithLeaderNotInPlacement(t *testing.T) {
+ t.Parallel()
+
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -603,6 +631,8 @@ func TestElectionManagerVerifyLeaderDelayWithLeaderNotInPlacement(t *testing.T)
}
func TestElectionManagerVerifyLeaderDelayWithLeaderOwningDifferentShardSet(t *testing.T) {
+ t.Parallel()
+
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -667,6 +697,8 @@ func TestElectionManagerVerifyLeaderDelayWithLeaderOwningDifferentShardSet(t *te
}
func TestElectionManagerVerifyWithLeaderErrors(t *testing.T) {
+ t.Parallel()
+
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -815,6 +847,8 @@ func TestElectionManagerVerifyCampaignDisabled(t *testing.T) {
}
func TestElectionManagerCheckCampaignStateLoop(t *testing.T) {
+ t.Parallel()
+
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -834,6 +868,7 @@ func TestElectionManagerCheckCampaignStateLoop(t *testing.T) {
campaignOpts = campaignOpts.SetLeaderValue(leaderValue)
opts := testElectionManagerOptions(t, ctrl).
SetCampaignOptions(campaignOpts).
+ SetCampaignStateCheckInterval(100 * time.Millisecond).
SetLeaderService(leaderService)
mgr := NewElectionManager(opts).(*electionManager)
iterCh := make(chan enabledRes)
diff --git a/src/aggregator/aggregator/map.go b/src/aggregator/aggregator/map.go
index 130c20d680..6d8139390c 100644
--- a/src/aggregator/aggregator/map.go
+++ b/src/aggregator/aggregator/map.go
@@ -35,7 +35,7 @@ import (
"github.com/m3db/m3/src/metrics/metric/aggregated"
"github.com/m3db/m3/src/metrics/metric/unaggregated"
"github.com/m3db/m3/src/x/clock"
- "github.com/m3db/m3/src/x/close"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/uber-go/tally"
)
@@ -108,7 +108,7 @@ type metricMap struct {
firstInsertAt time.Time
rateLimiter *rate.Limiter
runtimeOpts runtime.Options
- runtimeOptsCloser close.SimpleCloser
+ runtimeOptsCloser xresource.SimpleCloser
sleepFn sleepFn
metrics metricMapMetrics
}
diff --git a/src/aggregator/runtime/options_manager.go b/src/aggregator/runtime/options_manager.go
index 08c26060f2..b2182d81a0 100644
--- a/src/aggregator/runtime/options_manager.go
+++ b/src/aggregator/runtime/options_manager.go
@@ -21,7 +21,7 @@
package runtime
import (
- "github.com/m3db/m3/src/x/close"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/m3db/m3/src/x/watch"
)
@@ -36,7 +36,7 @@ type OptionsManager interface {
// RegisterWatcher registers a watcher that watches updates to runtime options.
// When an update arrives, the manager will deliver the update to all registered
// watchers.
- RegisterWatcher(l OptionsWatcher) close.SimpleCloser
+ RegisterWatcher(l OptionsWatcher) xresource.SimpleCloser
// Close closes the watcher and all descendent watches
Close()
@@ -72,7 +72,7 @@ func (w *optionsManager) RuntimeOptions() Options {
func (w *optionsManager) RegisterWatcher(
watcher OptionsWatcher,
-) close.SimpleCloser {
+) xresource.SimpleCloser {
_, watch, _ := w.watchable.Watch()
// The watchable is always initialized so it's okay to do a blocking read.
diff --git a/src/cluster/client/etcd/client.go b/src/cluster/client/etcd/client.go
index c3218c2edf..88f85e8dee 100644
--- a/src/cluster/client/etcd/client.go
+++ b/src/cluster/client/etcd/client.go
@@ -39,8 +39,8 @@ import (
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/retry"
- "go.etcd.io/etcd/clientv3"
"github.com/uber-go/tally"
+ "go.etcd.io/etcd/clientv3"
"go.uber.org/zap"
)
@@ -282,9 +282,10 @@ func newClient(cluster Cluster) (*clientv3.Client, error) {
return nil, err
}
cfg := clientv3.Config{
+ AutoSyncInterval: cluster.AutoSyncInterval(),
+ DialTimeout: cluster.DialTimeout(),
Endpoints: cluster.Endpoints(),
TLS: tls,
- AutoSyncInterval: cluster.AutoSyncInterval(),
}
if opts := cluster.KeepAliveOptions(); opts.KeepAliveEnabled() {
@@ -296,6 +297,7 @@ func newClient(cluster Cluster) (*clientv3.Client, error) {
}
cfg.DialKeepAliveTime = keepAlivePeriod
cfg.DialKeepAliveTimeout = opts.KeepAliveTimeout()
+ cfg.PermitWithoutStream = true
}
return clientv3.New(cfg)
diff --git a/src/cluster/client/etcd/config_test.go b/src/cluster/client/etcd/config_test.go
index 62c6af66eb..f2c5bb2f5a 100644
--- a/src/cluster/client/etcd/config_test.go
+++ b/src/cluster/client/etcd/config_test.go
@@ -138,9 +138,9 @@ m3sd:
require.True(t, exists)
keepAliveOpts = cluster2.KeepAliveOptions()
require.Equal(t, true, keepAliveOpts.KeepAliveEnabled())
- require.Equal(t, 5*time.Minute, keepAliveOpts.KeepAlivePeriod())
- require.Equal(t, 5*time.Minute, keepAliveOpts.KeepAlivePeriodMaxJitter())
- require.Equal(t, 20*time.Second, keepAliveOpts.KeepAliveTimeout())
+ require.Equal(t, 20*time.Second, keepAliveOpts.KeepAlivePeriod())
+ require.Equal(t, 10*time.Second, keepAliveOpts.KeepAlivePeriodMaxJitter())
+ require.Equal(t, 10*time.Second, keepAliveOpts.KeepAliveTimeout())
t.Run("TestOptionsNewDirectoryMode", func(t *testing.T) {
opts := cfg.NewOptions()
diff --git a/src/cluster/client/etcd/options.go b/src/cluster/client/etcd/options.go
index e6e3c719de..566d240e10 100644
--- a/src/cluster/client/etcd/options.go
+++ b/src/cluster/client/etcd/options.go
@@ -36,10 +36,13 @@ import (
)
const (
+ defaultAutoSyncInterval = 1 * time.Minute
+ defaultDialTimeout = 15 * time.Second
+
defaultKeepAliveEnabled = true
- defaultKeepAlivePeriod = 5 * time.Minute
- defaultKeepAlivePeriodMaxJitter = 5 * time.Minute
- defaultKeepAliveTimeout = 20 * time.Second
+ defaultKeepAlivePeriod = 20 * time.Second
+ defaultKeepAlivePeriodMaxJitter = 10 * time.Second
+ defaultKeepAliveTimeout = 10 * time.Second
defaultRetryInitialBackoff = 2 * time.Second
defaultRetryBackoffFactor = 2.0
@@ -316,8 +319,10 @@ func (o options) NewDirectoryMode() os.FileMode {
// NewCluster creates a Cluster.
func NewCluster() Cluster {
return cluster{
- keepAliveOpts: NewKeepAliveOptions(),
- tlsOpts: NewTLSOptions(),
+ autoSyncInterval: defaultAutoSyncInterval,
+ dialTimeout: defaultDialTimeout,
+ keepAliveOpts: NewKeepAliveOptions(),
+ tlsOpts: NewTLSOptions(),
}
}
@@ -327,6 +332,7 @@ type cluster struct {
keepAliveOpts KeepAliveOptions
tlsOpts TLSOptions
autoSyncInterval time.Duration
+ dialTimeout time.Duration
}
func (c cluster) Zone() string {
@@ -373,3 +379,15 @@ func (c cluster) SetAutoSyncInterval(autoSyncInterval time.Duration) Cluster {
c.autoSyncInterval = autoSyncInterval
return c
}
+
+//nolint:gocritic
+func (c cluster) DialTimeout() time.Duration {
+ return c.dialTimeout
+}
+
+//nolint:gocritic
+func (c cluster) SetDialTimeout(dialTimeout time.Duration) Cluster {
+ c.dialTimeout = dialTimeout
+
+ return c
+}
diff --git a/src/cluster/client/etcd/options_test.go b/src/cluster/client/etcd/options_test.go
index aee7366fab..befd638960 100644
--- a/src/cluster/client/etcd/options_test.go
+++ b/src/cluster/client/etcd/options_test.go
@@ -32,16 +32,22 @@ import (
)
func TestKeepAliveOptions(t *testing.T) {
- opts := NewKeepAliveOptions().
+ opts := NewKeepAliveOptions()
+ require.Equal(t, defaultKeepAliveEnabled, opts.KeepAliveEnabled())
+ require.Equal(t, defaultKeepAlivePeriod, opts.KeepAlivePeriod())
+ require.Equal(t, defaultKeepAlivePeriodMaxJitter, opts.KeepAlivePeriodMaxJitter())
+ require.Equal(t, defaultKeepAliveTimeout, opts.KeepAliveTimeout())
+
+ opts = NewKeepAliveOptions().
SetKeepAliveEnabled(true).
- SetKeepAlivePeriod(10 * time.Second).
- SetKeepAlivePeriodMaxJitter(5 * time.Second).
- SetKeepAliveTimeout(time.Second)
+ SetKeepAlivePeriod(1234 * time.Second).
+ SetKeepAlivePeriodMaxJitter(5000 * time.Second).
+ SetKeepAliveTimeout(time.Hour)
require.Equal(t, true, opts.KeepAliveEnabled())
- require.Equal(t, 10*time.Second, opts.KeepAlivePeriod())
- require.Equal(t, 5*time.Second, opts.KeepAlivePeriodMaxJitter())
- require.Equal(t, time.Second, opts.KeepAliveTimeout())
+ require.Equal(t, 1234*time.Second, opts.KeepAlivePeriod())
+ require.Equal(t, 5000*time.Second, opts.KeepAlivePeriodMaxJitter())
+ require.Equal(t, time.Hour, opts.KeepAliveTimeout())
}
func TestCluster(t *testing.T) {
@@ -63,6 +69,22 @@ func TestCluster(t *testing.T) {
assert.Equal(t, "z", c.Zone())
assert.Equal(t, []string{"e1"}, c.Endpoints())
assert.Equal(t, aOpts, c.TLSOptions())
+ assert.Equal(t, defaultAutoSyncInterval, c.AutoSyncInterval())
+ assert.Equal(t, defaultDialTimeout, c.DialTimeout())
+
+ c = c.SetAutoSyncInterval(123 * time.Minute)
+ assert.Equal(t, "z", c.Zone())
+ assert.Equal(t, []string{"e1"}, c.Endpoints())
+ assert.Equal(t, aOpts, c.TLSOptions())
+ assert.Equal(t, 123*time.Minute, c.AutoSyncInterval())
+ assert.Equal(t, defaultDialTimeout, c.DialTimeout())
+
+ c = c.SetDialTimeout(42 * time.Hour)
+ assert.Equal(t, "z", c.Zone())
+ assert.Equal(t, []string{"e1"}, c.Endpoints())
+ assert.Equal(t, aOpts, c.TLSOptions())
+ assert.Equal(t, 123*time.Minute, c.AutoSyncInterval())
+ assert.Equal(t, 42*time.Hour, c.DialTimeout())
}
func TestTLSOptions(t *testing.T) {
diff --git a/src/cluster/client/etcd/types.go b/src/cluster/client/etcd/types.go
index ffdc284bac..9c052d1e4f 100644
--- a/src/cluster/client/etcd/types.go
+++ b/src/cluster/client/etcd/types.go
@@ -129,6 +129,9 @@ type Cluster interface {
TLSOptions() TLSOptions
SetTLSOptions(TLSOptions) Cluster
- SetAutoSyncInterval(value time.Duration) Cluster
AutoSyncInterval() time.Duration
+ SetAutoSyncInterval(value time.Duration) Cluster
+
+ DialTimeout() time.Duration
+ SetDialTimeout(value time.Duration) Cluster
}
diff --git a/src/cluster/placement/service/mirrored_custom_groups_test.go b/src/cluster/placement/service/mirrored_custom_groups_test.go
index a27c9df514..1d624aaaef 100644
--- a/src/cluster/placement/service/mirrored_custom_groups_test.go
+++ b/src/cluster/placement/service/mirrored_custom_groups_test.go
@@ -59,7 +59,6 @@ const (
instG3I1 = "g3_i1"
instG3I2 = "g3_i2"
instG3I3 = "g3_i3"
-
)
var (
@@ -205,7 +204,7 @@ func mirroredCustomGroupSelectorSetup(t *testing.T) *mirroredCustomGroupSelector
tctx.Groups = testGroups
opts := placement.NewOptions().
- SetValidZone(zone).
+ SetValidZone(zone).
SetIsMirrored(true)
tctx.Selector = selector.NewMirroredCustomGroupSelector(
@@ -217,7 +216,7 @@ func mirroredCustomGroupSelectorSetup(t *testing.T) *mirroredCustomGroupSelector
tctx.KVStore = mem.NewStore()
tctx.Storage = placementstorage.NewPlacementStorage(tctx.KVStore, "placement", tctx.Opts)
- tctx.Service = NewPlacementService(tctx.Storage, tctx.Opts)
+ tctx.Service = NewPlacementService(tctx.Storage, WithPlacementOptions(tctx.Opts))
return tctx
}
diff --git a/src/cluster/placement/service/operator.go b/src/cluster/placement/service/operator.go
index 75da980fc4..5d253969cc 100644
--- a/src/cluster/placement/service/operator.go
+++ b/src/cluster/placement/service/operator.go
@@ -31,10 +31,10 @@ import (
// given placement.
// If initialPlacement is nil, BuildInitialPlacement must be called before any operations on the
// placement.
-func NewPlacementOperator(initialPlacement placement.Placement, opts placement.Options) placement.Operator {
+func NewPlacementOperator(initialPlacement placement.Placement, opts ...Option) placement.Operator {
store := newDummyStore(initialPlacement)
return &placementOperator{
- placementServiceImpl: newPlacementServiceImpl(opts, store),
+ placementServiceImpl: newPlacementServiceImpl(store, opts...),
store: store,
}
}
@@ -97,4 +97,3 @@ func (d *dummyStore) Placement() (placement.Placement, error) {
}
return d.curPlacement, nil
}
-
diff --git a/src/cluster/placement/service/operator_test.go b/src/cluster/placement/service/operator_test.go
index af3b1b8688..3fcf1132e0 100644
--- a/src/cluster/placement/service/operator_test.go
+++ b/src/cluster/placement/service/operator_test.go
@@ -25,7 +25,6 @@ import (
"testing"
"github.com/m3db/m3/src/cluster/placement"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -33,13 +32,13 @@ import (
func TestOperator(t *testing.T) {
type testDeps struct {
options placement.Options
- op placement.Operator
+ op placement.Operator
}
setup := func(t *testing.T) testDeps {
options := placement.NewOptions().SetAllowAllZones(true)
return testDeps{
options: options,
- op: NewPlacementOperator(nil, options),
+ op: NewPlacementOperator(nil, WithPlacementOptions(options)),
}
}
@@ -60,7 +59,7 @@ func TestOperator(t *testing.T) {
t.Run("end-to-end flow", func(t *testing.T) {
tdeps := setup(t)
- op := NewPlacementOperator(nil, tdeps.options)
+ op := NewPlacementOperator(nil, WithPlacementOptions(tdeps.options))
store := newMockStorage()
pl, err := op.BuildInitialPlacement([]placement.Instance{newTestInstance()}, 10, 1)
@@ -81,7 +80,7 @@ func TestOperator(t *testing.T) {
require.NoError(t, err)
// expect exactly one version increment, from store.SetIfNotExist
- assert.Equal(t, initialVersion + 1, pl.Version())
+ assert.Equal(t, initialVersion+1, pl.Version())
// spot check the results
allAvailable := true
@@ -94,15 +93,15 @@ func TestOperator(t *testing.T) {
})
}
-type dummyStoreTestDeps struct{
+type dummyStoreTestDeps struct {
store *dummyStore
- pl placement.Placement
+ pl placement.Placement
}
func dummyStoreSetup(t *testing.T) dummyStoreTestDeps {
return dummyStoreTestDeps{
store: newDummyStore(nil),
- pl: placement.NewPlacement(),
+ pl: placement.NewPlacement(),
}
}
diff --git a/src/cluster/placement/service/service.go b/src/cluster/placement/service/service.go
index 67873db30f..2ee4331c9c 100644
--- a/src/cluster/placement/service/service.go
+++ b/src/cluster/placement/service/service.go
@@ -27,7 +27,6 @@ import (
"github.com/m3db/m3/src/cluster/placement/algo"
"github.com/m3db/m3/src/cluster/placement/selector"
"github.com/m3db/m3/src/cluster/shard"
-
"go.uber.org/zap"
)
@@ -37,36 +36,79 @@ type placementService struct {
}
// NewPlacementService returns an instance of placement service.
-func NewPlacementService(s placement.Storage, opts placement.Options) placement.Service {
+func NewPlacementService(s placement.Storage, opts ...Option) placement.Service {
return &placementService{
Storage: s,
placementServiceImpl: newPlacementServiceImpl(
- opts,
s,
-
+ opts...,
),
}
}
+type options struct {
+ placementAlgorithm placement.Algorithm
+ placementOpts placement.Options
+}
+
+// Option is an interface for PlacementService options.
+type Option interface {
+ apply(*options)
+}
+
+// WithAlgorithm sets the algorithm implementation that will be used by PlacementService.
+func WithAlgorithm(algo placement.Algorithm) Option {
+ return &algorithmOption{placementAlgorithm: algo}
+}
+
+type algorithmOption struct {
+ placementAlgorithm placement.Algorithm
+}
+
+func (a *algorithmOption) apply(opts *options) {
+ opts.placementAlgorithm = a.placementAlgorithm
+}
+
+type placementOptionsOption struct {
+ opts placement.Options
+}
+
+func (a *placementOptionsOption) apply(opts *options) {
+ opts.placementOpts = a.opts
+}
+
+// WithPlacementOptions sets the placement options for PlacementService.
+func WithPlacementOptions(opts placement.Options) Option {
+ return &placementOptionsOption{opts: opts}
+}
+
func newPlacementServiceImpl(
- opts placement.Options,
storage minimalPlacementStorage,
+ opts ...Option,
) *placementServiceImpl {
- if opts == nil {
- opts = placement.NewOptions()
+ o := options{
+ placementOpts: placement.NewOptions(),
+ }
+
+ for _, opt := range opts {
+ opt.apply(&o)
+ }
+
+ if o.placementAlgorithm == nil {
+ o.placementAlgorithm = algo.NewAlgorithm(o.placementOpts)
}
- instanceSelector := opts.InstanceSelector()
+ instanceSelector := o.placementOpts.InstanceSelector()
if instanceSelector == nil {
- instanceSelector = selector.NewInstanceSelector(opts)
+ instanceSelector = selector.NewInstanceSelector(o.placementOpts)
}
return &placementServiceImpl{
store: storage,
- opts: opts,
- algo: algo.NewAlgorithm(opts),
+ opts: o.placementOpts,
+ algo: o.placementAlgorithm,
selector: instanceSelector,
- logger: opts.InstrumentOptions().Logger(),
+ logger: o.placementOpts.InstrumentOptions().Logger(),
}
}
diff --git a/src/cluster/placement/service/service_test.go b/src/cluster/placement/service/service_test.go
index 5ebe2b49db..344454df28 100644
--- a/src/cluster/placement/service/service_test.go
+++ b/src/cluster/placement/service/service_test.go
@@ -26,15 +26,16 @@ import (
"github.com/m3db/m3/src/cluster/kv/mem"
"github.com/m3db/m3/src/cluster/placement"
+ "github.com/m3db/m3/src/cluster/placement/algo"
"github.com/m3db/m3/src/cluster/placement/storage"
"github.com/m3db/m3/src/cluster/shard"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGoodWorkflow(t *testing.T) {
- p := NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1"))
+ p := NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
testGoodWorkflow(t, p)
}
@@ -145,7 +146,8 @@ func assertPlacementInstanceEqualExceptShards(
}
func TestNonShardedWorkflow(t *testing.T) {
- ps := NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1").SetIsSharded(false))
+ ps := NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1").SetIsSharded(false)))
_, err := ps.BuildInitialPlacement([]placement.Instance{
placement.NewEmptyInstance("i1", "r1", "z1", "e1", 1),
@@ -206,7 +208,8 @@ func TestNonShardedWorkflow(t *testing.T) {
}
func TestBadInitialPlacement(t *testing.T) {
- p := NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1").SetIsSharded(false))
+ p := NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1").SetIsSharded(false)))
// invalid numShards
_, err := p.BuildInitialPlacement([]placement.Instance{
@@ -229,7 +232,8 @@ func TestBadInitialPlacement(t *testing.T) {
}, 10, 1)
assert.Error(t, err)
- p = NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1"))
+ p = NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
// Not enough instances.
_, err = p.BuildInitialPlacement([]placement.Instance{}, 10, 1)
@@ -264,7 +268,8 @@ func TestBadInitialPlacement(t *testing.T) {
}
func TestBadAddReplica(t *testing.T) {
- p := NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1"))
+ p := NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
_, err := p.BuildInitialPlacement(
[]placement.Instance{placement.NewEmptyInstance("i1", "r1", "z1", "endpoint", 1)},
@@ -276,14 +281,16 @@ func TestBadAddReplica(t *testing.T) {
assert.Error(t, err)
// Could not find placement for service.
- p = NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1"))
+ p = NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
_, err = p.AddReplica()
assert.Error(t, err)
}
func TestBadAddInstance(t *testing.T) {
ms := newMockStorage()
- p := NewPlacementService(ms, placement.NewOptions().SetValidZone("z1"))
+ p := NewPlacementService(ms,
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
_, err := p.BuildInitialPlacement(
[]placement.Instance{placement.NewEmptyInstance("i1", "r1", "z1", "endpoint", 1)},
@@ -298,18 +305,21 @@ func TestBadAddInstance(t *testing.T) {
_, _, err = p.AddInstances([]placement.Instance{placement.NewEmptyInstance("i2", "r2", "z2", "endpoint", 1)})
assert.Error(t, err)
- p = NewPlacementService(ms, placement.NewOptions().SetValidZone("z1"))
+ p = NewPlacementService(ms,
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
_, _, err = p.AddInstances([]placement.Instance{placement.NewEmptyInstance("i1", "r1", "z1", "endpoint", 1)})
assert.Error(t, err)
// could not find placement for service
- p = NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1"))
+ p = NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
_, _, err = p.AddInstances([]placement.Instance{placement.NewEmptyInstance("i2", "r2", "z1", "endpoint", 1)})
assert.Error(t, err)
}
func TestBadRemoveInstance(t *testing.T) {
- p := NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1"))
+ p := NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
_, err := p.BuildInitialPlacement(
[]placement.Instance{placement.NewEmptyInstance("i1", "r1", "z1", "endpoint", 1)},
@@ -325,13 +335,15 @@ func TestBadRemoveInstance(t *testing.T) {
assert.Error(t, err)
// Could not find placement for service.
- p = NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1"))
+ p = NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
_, err = p.RemoveInstances([]string{"i1"})
assert.Error(t, err)
}
func TestBadReplaceInstance(t *testing.T) {
- p := NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1"))
+ p := NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
_, err := p.BuildInitialPlacement([]placement.Instance{
placement.NewEmptyInstance("i1", "r1", "z1", "endpoint", 1),
@@ -363,7 +375,8 @@ func TestBadReplaceInstance(t *testing.T) {
assert.Error(t, err)
// Could not find placement for service.
- p = NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1"))
+ p = NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
_, _, err = p.ReplaceInstances(
[]string{"i1"},
[]placement.Instance{placement.NewEmptyInstance("i2", "r2", "z1", "endpoint", 1)},
@@ -406,7 +419,8 @@ func TestMarkShard(t *testing.T) {
_, err := ms.SetIfNotExist(p)
assert.NoError(t, err)
- ps := NewPlacementService(ms, placement.NewOptions().SetValidZone("z1"))
+ ps := NewPlacementService(ms,
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
_, err = ps.MarkShardsAvailable("i5", 1)
assert.NoError(t, err)
p, err = ms.Placement()
@@ -461,7 +475,7 @@ func TestMarkInstance(t *testing.T) {
_, err := ms.SetIfNotExist(p)
assert.NoError(t, err)
- ps := NewPlacementService(ms, placement.NewOptions().SetValidZone("z1"))
+ ps := NewPlacementService(ms, WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
// instance not exist
_, err = ps.MarkInstanceAvailable("i6")
@@ -553,7 +567,7 @@ func TestFindReplaceInstance(t *testing.T) {
},
}
for _, test := range testCases {
- p := NewPlacementService(nil, test.opts).(*placementService)
+ p := NewPlacementService(nil, WithPlacementOptions(test.opts)).(*placementService)
res, err := p.selector.SelectReplaceInstances(test.input, test.replaceIDs, s)
if test.expectErr {
assert.Error(t, err)
@@ -663,7 +677,7 @@ func TestMirrorWorkflow(t *testing.T) {
ps := NewPlacementService(
newMockStorage(),
- placement.NewOptions().SetValidZone("z1").SetIsMirrored(true),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1").SetIsMirrored(true)),
)
p, err := ps.BuildInitialPlacement(
@@ -758,7 +772,8 @@ func TestMirrorWorkflow(t *testing.T) {
}
func TestManyShards(t *testing.T) {
- p := NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1"))
+ p := NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1")))
i1 := placement.NewEmptyInstance("i1", "r1", "z1", "endpoint", 2)
i2 := placement.NewEmptyInstance("i2", "r2", "z1", "endpoint", 2)
i3 := placement.NewEmptyInstance("i3", "r3", "z1", "endpoint", 2)
@@ -816,7 +831,7 @@ func TestAddMultipleInstances(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- ps := NewPlacementService(newMockStorage(), test.opts)
+ ps := NewPlacementService(newMockStorage(), WithPlacementOptions(test.opts))
_, err := ps.BuildInitialPlacement(test.initialInstances, 4, 2)
require.NoError(t, err)
@@ -897,7 +912,7 @@ func TestReplaceInstances(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- ps := NewPlacementService(newMockStorage(), test.opts)
+ ps := NewPlacementService(newMockStorage(), WithPlacementOptions(test.opts))
_, err := ps.BuildInitialPlacement(test.initialInstances, 4, 2)
require.NoError(t, err)
@@ -913,7 +928,8 @@ func TestReplaceInstances(t *testing.T) {
}
func TestValidateFnBeforeUpdate(t *testing.T) {
- p := NewPlacementService(newMockStorage(), placement.NewOptions().SetValidZone("z1")).(*placementService)
+ p := NewPlacementService(newMockStorage(),
+ WithPlacementOptions(placement.NewOptions().SetValidZone("z1"))).(*placementService)
_, err := p.BuildInitialPlacement(
[]placement.Instance{placement.NewEmptyInstance("i1", "r1", "z1", "endpoint1", 1)},
@@ -927,6 +943,23 @@ func TestValidateFnBeforeUpdate(t *testing.T) {
assert.Equal(t, expectErr, err)
}
+func TestPlacementServiceImplOptions(t *testing.T) {
+ placementOptions := placement.NewOptions().SetValidZone("foozone").SetIsSharded(true)
+ al := algo.NewAlgorithm(placementOptions.SetIsSharded(false))
+
+ defaultImpl := newPlacementServiceImpl(nil)
+ require.NotNil(t, defaultImpl)
+ assert.NotNil(t, defaultImpl.opts)
+ assert.NotNil(t, defaultImpl.algo)
+ assert.NotEqual(t, placementOptions.ValidZone(), defaultImpl.opts.ValidZone())
+
+ customImpl := newPlacementServiceImpl(nil,
+ WithPlacementOptions(placementOptions),
+ WithAlgorithm(al))
+ assert.Equal(t, placementOptions.ValidZone(), customImpl.opts.ValidZone())
+ assert.Equal(t, al, customImpl.algo)
+}
+
func newMockStorage() placement.Storage {
return storage.NewPlacementStorage(mem.NewStore(), "", nil)
}
diff --git a/src/cluster/services/services.go b/src/cluster/services/services.go
index 2bf03263cb..7ef98aec7c 100644
--- a/src/cluster/services/services.go
+++ b/src/cluster/services/services.go
@@ -154,7 +154,7 @@ func (c *client) PlacementService(sid ServiceID, opts placement.Options) (placem
return ps.NewPlacementService(
storage.NewPlacementStorage(store, c.placementKeyFn(sid), opts),
- opts,
+ ps.WithPlacementOptions(opts),
), nil
}
diff --git a/src/cmd/services/m3coordinator/downsample/async_downsampler.go b/src/cmd/services/m3coordinator/downsample/async_downsampler.go
index 9bb2f9f7f3..081c7ee884 100644
--- a/src/cmd/services/m3coordinator/downsample/async_downsampler.go
+++ b/src/cmd/services/m3coordinator/downsample/async_downsampler.go
@@ -92,3 +92,12 @@ func (d *asyncDownsampler) NewMetricsAppender() (MetricsAppender, error) {
}
return d.downsampler.NewMetricsAppender()
}
+
+func (d *asyncDownsampler) Enabled() bool {
+ d.RLock()
+ defer d.RUnlock()
+ if d.err != nil {
+ return false
+ }
+ return d.downsampler.Enabled()
+}
diff --git a/src/cmd/services/m3coordinator/downsample/downsample_mock.go b/src/cmd/services/m3coordinator/downsample/downsample_mock.go
index 2d04b1a4c8..5885b2d61a 100644
--- a/src/cmd/services/m3coordinator/downsample/downsample_mock.go
+++ b/src/cmd/services/m3coordinator/downsample/downsample_mock.go
@@ -54,6 +54,20 @@ func (m *MockDownsampler) EXPECT() *MockDownsamplerMockRecorder {
return m.recorder
}
+// Enabled mocks base method
+func (m *MockDownsampler) Enabled() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Enabled")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// Enabled indicates an expected call of Enabled
+func (mr *MockDownsamplerMockRecorder) Enabled() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Enabled", reflect.TypeOf((*MockDownsampler)(nil).Enabled))
+}
+
// NewMetricsAppender mocks base method
func (m *MockDownsampler) NewMetricsAppender() (MetricsAppender, error) {
m.ctrl.T.Helper()
diff --git a/src/cmd/services/m3coordinator/downsample/downsampler.go b/src/cmd/services/m3coordinator/downsample/downsampler.go
index 376d990e69..dd78b2372b 100644
--- a/src/cmd/services/m3coordinator/downsample/downsampler.go
+++ b/src/cmd/services/m3coordinator/downsample/downsampler.go
@@ -26,6 +26,7 @@ import (
"github.com/m3db/m3/src/metrics/generated/proto/metricpb"
"github.com/m3db/m3/src/query/storage/m3"
+ "github.com/m3db/m3/src/query/storage/m3/storagemetadata"
"github.com/m3db/m3/src/query/ts"
"go.uber.org/zap"
@@ -35,6 +36,10 @@ import (
// Downsampler is a downsampler.
type Downsampler interface {
NewMetricsAppender() (MetricsAppender, error)
+ // Enabled indicates whether the downsampler is enabled or not. A
+ // downsampler is enabled if there are aggregated ClusterNamespaces
+ // that exist as downsampling only applies to aggregations.
+ Enabled() bool
}
// MetricsAppender is a metrics appender that can build a samples
@@ -88,6 +93,7 @@ type downsampler struct {
sync.RWMutex
metricsAppenderOpts metricsAppenderOptions
+ enabled bool
}
type downsamplerOptions struct {
@@ -145,6 +151,13 @@ func (d *downsampler) NewMetricsAppender() (MetricsAppender, error) {
return metricsAppender, nil
}
+func (d *downsampler) Enabled() bool {
+ d.RLock()
+ defer d.RUnlock()
+
+ return d.enabled
+}
+
func (d *downsampler) OnUpdate(namespaces m3.ClusterNamespaces) {
logger := d.opts.InstrumentOptions.Logger()
@@ -153,6 +166,15 @@ func (d *downsampler) OnUpdate(namespaces m3.ClusterNamespaces) {
return
}
+ var hasAggregatedNamespaces bool
+ for _, namespace := range namespaces {
+ attrs := namespace.Options().Attributes()
+ if attrs.MetricsType == storagemetadata.AggregatedMetricsType {
+ hasAggregatedNamespaces = true
+ break
+ }
+ }
+
autoMappingRules, err := NewAutoMappingRules(namespaces)
if err != nil {
logger.Error("could not generate automapping rules for aggregated namespaces."+
@@ -180,5 +202,7 @@ func (d *downsampler) OnUpdate(namespaces m3.ClusterNamespaces) {
d.Lock()
d.metricsAppenderOpts.defaultStagedMetadatasProtos = defaultStagedMetadatasProtos
+ // Can only downsample when aggregated namespaces are available.
+ d.enabled = hasAggregatedNamespaces
d.Unlock()
}
diff --git a/src/cmd/services/m3coordinator/downsample/downsampler_test.go b/src/cmd/services/m3coordinator/downsample/downsampler_test.go
index 38cf4a3cc9..bc850de531 100644
--- a/src/cmd/services/m3coordinator/downsample/downsampler_test.go
+++ b/src/cmd/services/m3coordinator/downsample/downsampler_test.go
@@ -102,6 +102,8 @@ func TestDownsamplerAggregationWithAutoMappingRulesFromNamespacesWatcher(t *test
},
})
+ require.False(t, testDownsampler.downsampler.Enabled())
+
origStagedMetadata := originalStagedMetadata(t, testDownsampler)
session := dbclient.NewMockSession(ctrl)
@@ -114,10 +116,47 @@ func TestDownsamplerAggregationWithAutoMappingRulesFromNamespacesWatcher(t *test
waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata)
+ require.True(t, testDownsampler.downsampler.Enabled())
+
// Test expected output
testDownsamplerAggregation(t, testDownsampler)
}
+func TestDownsamplerAggregationToggleEnabled(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{})
+
+ require.False(t, testDownsampler.downsampler.Enabled())
+
+ // Add an aggregated namespace and expect downsampler to be enabled.
+ session := dbclient.NewMockSession(ctrl)
+ setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{
+ NamespaceID: ident.StringID("2s:1d"),
+ Resolution: 2 * time.Second,
+ Retention: 24 * time.Hour,
+ Session: session,
+ })
+ waitForEnabledUpdate(t, &testDownsampler, false)
+
+ require.True(t, testDownsampler.downsampler.Enabled())
+
+ // Set just an unaggregated namespace and expect downsampler to be disabled.
+ clusters, err := m3.NewClusters(m3.UnaggregatedClusterNamespaceDefinition{
+ NamespaceID: ident.StringID("default"),
+ Retention: 48 * time.Hour,
+ Session: session,
+ })
+ require.NoError(t, err)
+ require.NoError(t,
+ testDownsampler.opts.ClusterNamespacesWatcher.Update(clusters.ClusterNamespaces()))
+
+ waitForEnabledUpdate(t, &testDownsampler, true)
+
+ require.False(t, testDownsampler.downsampler.Enabled())
+}
+
func TestDownsamplerAggregationWithRulesStore(t *testing.T) {
testDownsampler := newTestDownsampler(t, testDownsamplerOptions{})
rulesStore := testDownsampler.rulesStore
@@ -1253,7 +1292,18 @@ func waitForStagedMetadataUpdate(t *testing.T, testDownsampler testDownsampler,
return !assert.ObjectsAreEqual(origStagedMetadata, ds.metricsAppenderOpts.defaultStagedMetadatasProtos)
}, time.Second))
+}
+func waitForEnabledUpdate(t *testing.T, testDownsampler *testDownsampler, current bool) {
+ ds, ok := testDownsampler.downsampler.(*downsampler)
+ require.True(t, ok)
+
+ require.True(t, clock.WaitUntil(func() bool {
+ ds.RLock()
+ defer ds.RUnlock()
+
+ return current != ds.enabled
+ }, time.Second))
}
type testExpectedWrite struct {
diff --git a/src/cmd/services/m3coordinator/downsample/options.go b/src/cmd/services/m3coordinator/downsample/options.go
index 2089552c01..b1b61df731 100644
--- a/src/cmd/services/m3coordinator/downsample/options.go
+++ b/src/cmd/services/m3coordinator/downsample/options.go
@@ -1052,7 +1052,7 @@ func (o DownsamplerOptions) newAggregatorPlacementManager(
placementSvc := placementservice.NewPlacementService(
placementstorage.NewPlacementStorage(localKVStore, placementKVKey, placementOpts),
- placementOpts)
+ placementservice.WithPlacementOptions(placementOpts))
_, err := placementSvc.BuildInitialPlacement([]placement.Instance{instance}, numShards,
replicationFactor)
diff --git a/src/cmd/services/m3coordinator/ingest/write.go b/src/cmd/services/m3coordinator/ingest/write.go
index 199f3325a1..27cd4b477a 100644
--- a/src/cmd/services/m3coordinator/ingest/write.go
+++ b/src/cmd/services/m3coordinator/ingest/write.go
@@ -200,17 +200,16 @@ func (d *downsamplerAndWriter) shouldDownsample(
overrides WriteOptions,
) bool {
var (
- downsamplerExists = d.downsampler != nil
// If they didn't request the mapping rules to be overridden, then assume they want the default
// ones.
useDefaultMappingRules = !overrides.DownsampleOverride
// If they did try and override the mapping rules, make sure they've provided at least one.
_, downsampleOverride = d.downsampleOverrideRules(overrides)
)
- // Only downsample if the downsampler exists, and they either want to use the default mapping
+ // Only downsample if the downsampler is enabled, and they either want to use the default mapping
// rules, or they're trying to override the mapping rules and they've provided at least one
// override to do so.
- return downsamplerExists && (useDefaultMappingRules || downsampleOverride)
+ return d.downsampler.Enabled() && (useDefaultMappingRules || downsampleOverride)
}
func (d *downsamplerAndWriter) downsampleOverrideRules(
diff --git a/src/cmd/services/m3coordinator/ingest/write_test.go b/src/cmd/services/m3coordinator/ingest/write_test.go
index 243d8414dc..ae798c6825 100644
--- a/src/cmd/services/m3coordinator/ingest/write_test.go
+++ b/src/cmd/services/m3coordinator/ingest/write_test.go
@@ -437,9 +437,8 @@ func TestDownsampleAndWriteNoDownsampler(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- downAndWrite, _, session := newTestDownsamplerAndWriter(t, ctrl,
+ downAndWrite, _, session := newTestDownsamplerAndWriterWithEnabled(t, ctrl, false,
testDownsamplerAndWriterOptions{})
- downAndWrite.downsampler = nil
expectDefaultStorageWrites(session, testDatapoints1, testAnnotation1)
@@ -653,9 +652,8 @@ func TestDownsampleAndWriteBatchNoDownsampler(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
- downAndWrite, _, session := newTestDownsamplerAndWriter(t, ctrl,
+ downAndWrite, _, session := newTestDownsamplerAndWriterWithEnabled(t, ctrl, false,
testDownsamplerAndWriterOptions{})
- downAndWrite.downsampler = nil
for _, entry := range testEntries {
for _, dp := range entry.datapoints {
@@ -811,6 +809,15 @@ func newTestDownsamplerAndWriter(
t *testing.T,
ctrl *gomock.Controller,
opts testDownsamplerAndWriterOptions,
+) (*downsamplerAndWriter, *downsample.MockDownsampler, *client.MockSession) {
+ return newTestDownsamplerAndWriterWithEnabled(t, ctrl, true, opts)
+}
+
+func newTestDownsamplerAndWriterWithEnabled(
+ t *testing.T,
+ ctrl *gomock.Controller,
+ enabled bool,
+ opts testDownsamplerAndWriterOptions,
) (*downsamplerAndWriter, *downsample.MockDownsampler, *client.MockSession) {
var (
storage storage.Storage
@@ -822,6 +829,7 @@ func newTestDownsamplerAndWriter(
storage, session = testm3.NewStorageAndSession(t, ctrl)
}
downsampler := downsample.NewMockDownsampler(ctrl)
+ downsampler.EXPECT().Enabled().Return(enabled)
return NewDownsamplerAndWriter(storage, downsampler, testWorkerPool, instrument.NewOptions()).(*downsamplerAndWriter), downsampler, session
}
@@ -833,6 +841,7 @@ func newTestDownsamplerAndWriterWithAggregatedNamespace(
storage, session := testm3.NewStorageAndSessionWithAggregatedNamespaces(
t, ctrl, aggregatedNamespaces)
downsampler := downsample.NewMockDownsampler(ctrl)
+ downsampler.EXPECT().Enabled().Return(true)
return NewDownsamplerAndWriter(storage, downsampler, testWorkerPool, instrument.NewOptions()).(*downsamplerAndWriter), downsampler, session
}
diff --git a/src/cmd/services/m3dbnode/config/bootstrap.go b/src/cmd/services/m3dbnode/config/bootstrap.go
index 108b9dc824..09d618b82c 100644
--- a/src/cmd/services/m3dbnode/config/bootstrap.go
+++ b/src/cmd/services/m3dbnode/config/bootstrap.go
@@ -153,6 +153,34 @@ type BootstrapConfiguration struct {
// IndexSegmentConcurrency determines the concurrency for building index
// segments.
IndexSegmentConcurrency *int `yaml:"indexSegmentConcurrency"`
+
+ // Verify specifies verification checks.
+ Verify *BootstrapVerifyConfiguration `yaml:"verify"`
+}
+
+// VerifyOrDefault returns verify configuration or default.
+func (bsc BootstrapConfiguration) VerifyOrDefault() BootstrapVerifyConfiguration {
+ if bsc.Verify == nil {
+ return BootstrapVerifyConfiguration{}
+ }
+
+ return *bsc.Verify
+}
+
+// BootstrapVerifyConfiguration outlines verification checks to enable
+// during a bootstrap.
+type BootstrapVerifyConfiguration struct {
+ VerifyIndexSegments *bool `yaml:"verifyIndexSegments"`
+}
+
+// VerifyIndexSegmentsOrDefault returns whether to verify index segments
+// or use default value.
+func (c BootstrapVerifyConfiguration) VerifyIndexSegmentsOrDefault() bool {
+ if c.VerifyIndexSegments == nil {
+ return false
+ }
+
+ return *c.VerifyIndexSegments
}
// BootstrapFilesystemConfiguration specifies config for the fs bootstrapper.
@@ -288,11 +316,13 @@ func (bsc BootstrapConfiguration) New(
SetFilesystemOptions(fsOpts).
SetIndexOptions(opts.IndexOptions()).
SetPersistManager(opts.PersistManager()).
+ SetIndexClaimsManager(opts.IndexClaimsManager()).
SetCompactor(compactor).
SetRuntimeOptionsManager(opts.RuntimeOptionsManager()).
SetIdentifierPool(opts.IdentifierPool()).
SetMigrationOptions(fsCfg.migration().NewOptions()).
- SetStorageOptions(opts)
+ SetStorageOptions(opts).
+ SetIndexSegmentsVerify(bsc.VerifyOrDefault().VerifyIndexSegmentsOrDefault())
if v := bsc.IndexSegmentConcurrency; v != nil {
fsbOpts = fsbOpts.SetIndexSegmentConcurrency(*v)
}
@@ -329,6 +359,7 @@ func (bsc BootstrapConfiguration) New(
SetIndexOptions(opts.IndexOptions()).
SetAdminClient(adminClient).
SetPersistManager(opts.PersistManager()).
+ SetIndexClaimsManager(opts.IndexClaimsManager()).
SetCompactor(compactor).
SetRuntimeOptionsManager(opts.RuntimeOptionsManager()).
SetContextPool(opts.ContextPool()).
diff --git a/src/cmd/services/m3dbnode/config/config.go b/src/cmd/services/m3dbnode/config/config.go
index e0ebed20da..1ec7e0a165 100644
--- a/src/cmd/services/m3dbnode/config/config.go
+++ b/src/cmd/services/m3dbnode/config/config.go
@@ -31,6 +31,7 @@ import (
coordinatorcfg "github.com/m3db/m3/src/cmd/services/m3query/config"
"github.com/m3db/m3/src/dbnode/client"
+ "github.com/m3db/m3/src/dbnode/discovery"
"github.com/m3db/m3/src/dbnode/environment"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/x/config/hostid"
@@ -51,6 +52,38 @@ const (
defaultEtcdServerPort = 2380
)
+var (
+ defaultLogging = xlog.Configuration{
+ Level: "info",
+ }
+ defaultMetricsSanitization = instrument.PrometheusMetricSanitization
+ defaultMetricsExtendedMetricsType = instrument.DetailedExtendedMetrics
+ defaultMetrics = instrument.MetricsConfiguration{
+ PrometheusReporter: &instrument.PrometheusConfiguration{
+ HandlerPath: "/metrics",
+ },
+ Sanitization: &defaultMetricsSanitization,
+ SamplingRate: 1.0,
+ ExtendedMetrics: &defaultMetricsExtendedMetricsType,
+ }
+ defaultListenAddress = "0.0.0.0:9000"
+ defaultClusterListenAddress = "0.0.0.0:9001"
+ defaultHTTPNodeListenAddress = "0.0.0.0:9002"
+ defaultHTTPClusterListenAddress = "0.0.0.0:9003"
+ defaultDebugListenAddress = "0.0.0.0:9004"
+ defaultGCPercentage = 100
+ defaultWriteNewSeriesAsync = true
+ defaultWriteNewSeriesBackoffDuration = 2 * time.Millisecond
+ defaultCommitLogPolicy = CommitLogPolicy{
+ FlushMaxBytes: 524288,
+ FlushEvery: time.Second * 1,
+ Queue: CommitLogQueuePolicy{
+ Size: 2097152,
+ CalculationType: CalculationTypeFixed,
+ },
+ }
+)
+
// Configuration is the top level configuration that includes both a DB
// node and a coordinator.
type Configuration struct {
@@ -61,10 +94,10 @@ type Configuration struct {
Coordinator *coordinatorcfg.Configuration `yaml:"coordinator"`
}
-// InitDefaultsAndValidate initializes all default values and validates the Configuration.
-// We use this method to validate fields where the validator package falls short.
-func (c *Configuration) InitDefaultsAndValidate() error {
- return c.DB.InitDefaultsAndValidate()
+// Validate validates the Configuration. We use this method to validate fields
+// where the validator package falls short.
+func (c *Configuration) Validate() error {
+ return c.DB.Validate()
}
// DBConfiguration is the configuration for a DB node.
@@ -76,25 +109,25 @@ type DBConfiguration struct {
Transforms TransformConfiguration `yaml:"transforms"`
// Logging configuration.
- Logging xlog.Configuration `yaml:"logging"`
+ Logging *xlog.Configuration `yaml:"logging"`
// Metrics configuration.
- Metrics instrument.MetricsConfiguration `yaml:"metrics"`
+ Metrics *instrument.MetricsConfiguration `yaml:"metrics"`
// The host and port on which to listen for the node service.
- ListenAddress string `yaml:"listenAddress" validate:"nonzero"`
+ ListenAddress *string `yaml:"listenAddress"`
// The host and port on which to listen for the cluster service.
- ClusterListenAddress string `yaml:"clusterListenAddress" validate:"nonzero"`
+ ClusterListenAddress *string `yaml:"clusterListenAddress"`
// The HTTP host and port on which to listen for the node service.
- HTTPNodeListenAddress string `yaml:"httpNodeListenAddress" validate:"nonzero"`
+ HTTPNodeListenAddress *string `yaml:"httpNodeListenAddress"`
// The HTTP host and port on which to listen for the cluster service.
- HTTPClusterListenAddress string `yaml:"httpClusterListenAddress" validate:"nonzero"`
+ HTTPClusterListenAddress *string `yaml:"httpClusterListenAddress"`
// The host and port on which to listen for debug endpoints.
- DebugListenAddress string `yaml:"debugListenAddress"`
+ DebugListenAddress *string `yaml:"debugListenAddress"`
// HostID is the local host ID configuration.
HostID hostid.Configuration `yaml:"hostID"`
@@ -105,14 +138,6 @@ type DBConfiguration struct {
// The initial garbage collection target percentage.
GCPercentage int `yaml:"gcPercentage" validate:"max=100"`
- // TODO(V1): Move to `limits`.
- // Write new series limit per second to limit overwhelming during new ID bursts.
- WriteNewSeriesLimitPerSecond int `yaml:"writeNewSeriesLimitPerSecond"`
-
- // TODO(V1): Move to `limits`.
- // Write new series backoff between batches of new series insertions.
- WriteNewSeriesBackoffDuration time.Duration `yaml:"writeNewSeriesBackoffDuration"`
-
// The tick configuration, omit this to use default settings.
Tick *TickConfiguration `yaml:"tick"`
@@ -129,7 +154,7 @@ type DBConfiguration struct {
Filesystem FilesystemConfiguration `yaml:"filesystem"`
// The commit log policy for the node.
- CommitLog CommitLogPolicy `yaml:"commitlog"`
+ CommitLog *CommitLogPolicy `yaml:"commitlog"`
// The repair policy for repairing data within a cluster.
Repair *RepairPolicy `yaml:"repair"`
@@ -138,16 +163,19 @@ type DBConfiguration struct {
Replication *ReplicationPolicy `yaml:"replication"`
// The pooling policy.
- PoolingPolicy PoolingPolicy `yaml:"pooling"`
+ PoolingPolicy *PoolingPolicy `yaml:"pooling"`
- // The environment (static or dynamic) configuration.
- EnvironmentConfig environment.Configuration `yaml:"config"`
+ // The discovery configuration.
+ DiscoveryConfig discovery.Configuration `yaml:"discovery"`
// The configuration for hashing
Hashing HashingConfiguration `yaml:"hashing"`
// Write new series asynchronously for fast ingestion of new ID bursts.
- WriteNewSeriesAsync bool `yaml:"writeNewSeriesAsync"`
+ WriteNewSeriesAsync *bool `yaml:"writeNewSeriesAsync"`
+
+ // Write new series backoff between batches of new series insertions.
+ WriteNewSeriesBackoffDuration *time.Duration `yaml:"writeNewSeriesBackoffDuration"`
// Proto contains the configuration specific to running in the ProtoDataMode.
Proto *ProtoConfiguration `yaml:"proto"`
@@ -171,14 +199,127 @@ type DBConfiguration struct {
Debug config.DebugConfiguration `yaml:"debug"`
}
-// InitDefaultsAndValidate initializes all default values and validates the Configuration.
-// We use this method to validate fields where the validator package falls short.
-func (c *DBConfiguration) InitDefaultsAndValidate() error {
+// LoggingOrDefault returns the logging configuration or defaults.
+func (c *DBConfiguration) LoggingOrDefault() xlog.Configuration {
+ if c.Logging == nil {
+ return defaultLogging
+ }
+
+ return *c.Logging
+}
+
+// MetricsOrDefault returns metrics configuration or defaults.
+func (c *DBConfiguration) MetricsOrDefault() *instrument.MetricsConfiguration {
+ if c.Metrics == nil {
+ return &defaultMetrics
+ }
+
+ return c.Metrics
+}
+
+// ListenAddressOrDefault returns the listen address or default.
+func (c *DBConfiguration) ListenAddressOrDefault() string {
+ if c.ListenAddress == nil {
+ return defaultListenAddress
+ }
+
+ return *c.ListenAddress
+}
+
+// ClusterListenAddressOrDefault returns the listen address or default.
+func (c *DBConfiguration) ClusterListenAddressOrDefault() string {
+ if c.ClusterListenAddress == nil {
+ return defaultClusterListenAddress
+ }
+
+ return *c.ClusterListenAddress
+}
+
+// HTTPNodeListenAddressOrDefault returns the listen address or default.
+func (c *DBConfiguration) HTTPNodeListenAddressOrDefault() string {
+ if c.HTTPNodeListenAddress == nil {
+ return defaultHTTPNodeListenAddress
+ }
+
+ return *c.HTTPNodeListenAddress
+}
+
+// HTTPClusterListenAddressOrDefault returns the listen address or default.
+func (c *DBConfiguration) HTTPClusterListenAddressOrDefault() string {
+ if c.HTTPClusterListenAddress == nil {
+ return defaultHTTPClusterListenAddress
+ }
+
+ return *c.HTTPClusterListenAddress
+}
+
+// DebugListenAddressOrDefault returns the listen address or default.
+func (c *DBConfiguration) DebugListenAddressOrDefault() string {
+ if c.DebugListenAddress == nil {
+ return defaultDebugListenAddress
+ }
+
+ return *c.DebugListenAddress
+}
+
+// CommitLogOrDefault returns the commit log policy or default.
+func (c *DBConfiguration) CommitLogOrDefault() CommitLogPolicy {
+ if c.CommitLog == nil {
+ return defaultCommitLogPolicy
+ }
+
+ return *c.CommitLog
+}
+
+// GCPercentageOrDefault returns the GC percentage or default.
+func (c *DBConfiguration) GCPercentageOrDefault() int {
+ if c.GCPercentage == 0 {
+ return defaultGCPercentage
+ }
+
+ return c.GCPercentage
+}
+
+// WriteNewSeriesAsyncOrDefault returns whether to write new series async or not.
+func (c *DBConfiguration) WriteNewSeriesAsyncOrDefault() bool {
+ if c.WriteNewSeriesAsync == nil {
+ return defaultWriteNewSeriesAsync
+ }
+
+ return *c.WriteNewSeriesAsync
+}
+
+// WriteNewSeriesBackoffDurationOrDefault returns the backoff duration for new series inserts.
+func (c *DBConfiguration) WriteNewSeriesBackoffDurationOrDefault() time.Duration {
+ if c.WriteNewSeriesBackoffDuration == nil {
+ return defaultWriteNewSeriesBackoffDuration
+ }
+
+ return *c.WriteNewSeriesBackoffDuration
+}
+
+// PoolingPolicyOrDefault returns the pooling policy or default.
+func (c *DBConfiguration) PoolingPolicyOrDefault() (PoolingPolicy, error) {
+ var policy PoolingPolicy
+ if c.PoolingPolicy != nil {
+ policy = *c.PoolingPolicy
+ }
+
+ if err := policy.InitDefaultsAndValidate(); err != nil {
+ return PoolingPolicy{}, err
+ }
+
+ return policy, nil
+}
+
+// Validate validates the Configuration. We use this method to validate fields
+// where the validator package falls short.
+func (c *DBConfiguration) Validate() error {
if err := c.Filesystem.Validate(); err != nil {
return err
}
- if err := c.PoolingPolicy.InitDefaultsAndValidate(); err != nil {
+ if _, err := c.PoolingPolicyOrDefault(); err != nil {
return err
}
@@ -447,12 +588,18 @@ func (c *ProtoConfiguration) Validate() error {
// NewEtcdEmbedConfig creates a new embedded etcd config from kv config.
func NewEtcdEmbedConfig(cfg DBConfiguration) (*embed.Config, error) {
newKVCfg := embed.NewConfig()
- kvCfg := cfg.EnvironmentConfig.SeedNodes
hostID, err := cfg.HostID.Resolve()
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed resolving hostID %w", err)
}
+
+ envCfg, err := cfg.DiscoveryConfig.EnvironmentConfig(hostID)
+ if err != nil {
+ return nil, fmt.Errorf("failed getting env config from discovery config %w", err)
+ }
+
+ kvCfg := envCfg.SeedNodes
newKVCfg.Name = hostID
dir := kvCfg.RootDir
diff --git a/src/cmd/services/m3dbnode/config/config_test.go b/src/cmd/services/m3dbnode/config/config_test.go
index 10c82b710b..c41a099429 100644
--- a/src/cmd/services/m3dbnode/config/config_test.go
+++ b/src/cmd/services/m3dbnode/config/config_test.go
@@ -95,9 +95,6 @@ db:
gcPercentage: 100
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
bootstrap:
filesystem:
numProcessorsPerCPU: 0.42
@@ -277,41 +274,42 @@ db:
lowWatermark: 0.01
highWatermark: 0.02
- config:
- service:
- env: production
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - 1.1.1.1:2379
- - 1.1.1.2:2379
- - 1.1.1.3:2379
-
- seedNodes:
- listenPeerUrls:
- - http://0.0.0.0:2380
- listenClientUrls:
- - http://0.0.0.0:2379
- rootDir: /var/lib/etcd
- initialAdvertisePeerUrls:
- - http://1.1.1.1:2380
- advertiseClientUrls:
- - http://1.1.1.1:2379
- initialCluster:
- - hostID: host1
- endpoint: http://1.1.1.1:2380
- clusterState: existing
- - hostID: host2
- endpoint: http://1.1.1.2:2380
- - hostID: host3
- endpoint: http://1.1.1.3:2380
+ discovery:
+ config:
+ service:
+ env: production
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - 1.1.1.1:2379
+ - 1.1.1.2:2379
+ - 1.1.1.3:2379
+
+ seedNodes:
+ listenPeerUrls:
+ - http://0.0.0.0:2380
+ listenClientUrls:
+ - http://0.0.0.0:2379
+ rootDir: /var/lib/etcd
+ initialAdvertisePeerUrls:
+ - http://1.1.1.1:2380
+ advertiseClientUrls:
+ - http://1.1.1.1:2379
+ initialCluster:
+ - hostID: host1
+ endpoint: http://1.1.1.1:2380
+ clusterState: existing
+ - hostID: host2
+ endpoint: http://1.1.1.2:2380
+ - hostID: host3
+ endpoint: http://1.1.1.3:2380
hashing:
seed: 42
writeNewSeriesAsync: true
-
+ writeNewSeriesBackoffDuration: 2ms
tracing:
backend: jaeger
`
@@ -409,8 +407,6 @@ func TestConfiguration(t *testing.T) {
writeShardsInitializing: null
shardsLeavingCountTowardsConsistency: null
gcPercentage: 100
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
tick: null
bootstrap:
mode: null
@@ -422,6 +418,7 @@ func TestConfiguration(t *testing.T) {
peers: null
cacheSeriesMetadata: null
indexSegmentConcurrency: null
+ verify: null
blockRetrieve: null
cache:
series: null
@@ -607,68 +604,73 @@ func TestConfiguration(t *testing.T) {
size: 8
lowWatermark: 0
highWatermark: 0
- config:
- services:
- - async: false
- clientOverrides:
- hostQueueFlushInterval: null
- targetHostQueueFlushSize: null
- service:
- zone: embedded
- env: production
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - 1.1.1.1:2379
- - 1.1.1.2:2379
- - 1.1.1.3:2379
- keepAlive: null
- tls: null
- autoSyncInterval: 0s
- m3sd:
- initTimeout: null
- watchWithRevision: 0
- newDirectoryMode: null
- statics: []
- seedNodes:
- rootDir: /var/lib/etcd
- initialAdvertisePeerUrls:
- - http://1.1.1.1:2380
- advertiseClientUrls:
- - http://1.1.1.1:2379
- listenPeerUrls:
- - http://0.0.0.0:2380
- listenClientUrls:
- - http://0.0.0.0:2379
- initialCluster:
- - hostID: host1
- endpoint: http://1.1.1.1:2380
- clusterState: existing
- - hostID: host2
- endpoint: http://1.1.1.2:2380
- clusterState: ""
- - hostID: host3
- endpoint: http://1.1.1.3:2380
- clusterState: ""
- clientTransportSecurity:
- caFile: ""
- certFile: ""
- keyFile: ""
- trustedCaFile: ""
- clientCertAuth: false
- autoTls: false
- peerTransportSecurity:
- caFile: ""
- certFile: ""
- keyFile: ""
- trustedCaFile: ""
- clientCertAuth: false
- autoTls: false
+ discovery:
+ type: null
+ m3dbCluster: null
+ m3AggregatorCluster: null
+ config:
+ services:
+ - async: false
+ clientOverrides:
+ hostQueueFlushInterval: null
+ targetHostQueueFlushSize: null
+ service:
+ zone: embedded
+ env: production
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - 1.1.1.1:2379
+ - 1.1.1.2:2379
+ - 1.1.1.3:2379
+ keepAlive: null
+ tls: null
+ autoSyncInterval: 0s
+ m3sd:
+ initTimeout: null
+ watchWithRevision: 0
+ newDirectoryMode: null
+ statics: []
+ seedNodes:
+ rootDir: /var/lib/etcd
+ initialAdvertisePeerUrls:
+ - http://1.1.1.1:2380
+ advertiseClientUrls:
+ - http://1.1.1.1:2379
+ listenPeerUrls:
+ - http://0.0.0.0:2380
+ listenClientUrls:
+ - http://0.0.0.0:2379
+ initialCluster:
+ - hostID: host1
+ endpoint: http://1.1.1.1:2380
+ clusterState: existing
+ - hostID: host2
+ endpoint: http://1.1.1.2:2380
+ clusterState: ""
+ - hostID: host3
+ endpoint: http://1.1.1.3:2380
+ clusterState: ""
+ clientTransportSecurity:
+ caFile: ""
+ certFile: ""
+ keyFile: ""
+ trustedCaFile: ""
+ clientCertAuth: false
+ autoTls: false
+ peerTransportSecurity:
+ caFile: ""
+ certFile: ""
+ keyFile: ""
+ trustedCaFile: ""
+ clientCertAuth: false
+ autoTls: false
hashing:
seed: 42
writeNewSeriesAsync: true
+ writeNewSeriesBackoffDuration: 2ms
proto: null
tracing:
serviceName: ""
@@ -719,6 +721,7 @@ func TestConfiguration(t *testing.T) {
maxOutstandingReadRequests: 0
maxOutstandingRepairedBytes: 0
maxEncodersPerBlock: 0
+ writeNewSeriesPerSecond: 0
wide: null
tchannel: null
debug:
diff --git a/src/cmd/services/m3dbnode/config/limits.go b/src/cmd/services/m3dbnode/config/limits.go
index 039d08f690..29c6dafe78 100644
--- a/src/cmd/services/m3dbnode/config/limits.go
+++ b/src/cmd/services/m3dbnode/config/limits.go
@@ -58,6 +58,9 @@ type LimitsConfiguration struct {
// load on the CPU, which can prevent other DB operations.
// A setting of 0 means there is no maximum.
MaxEncodersPerBlock int `yaml:"maxEncodersPerBlock" validate:"min=0"`
+
+ // Write new series limit per second to limit overwhelming during new ID bursts.
+ WriteNewSeriesPerSecond int `yaml:"writeNewSeriesPerSecond" validate:"min=0"`
}
// MaxRecentQueryResourceLimitConfiguration sets an upper limit on resources consumed by all queries
diff --git a/src/cmd/services/m3dbnode/main/main.go b/src/cmd/services/m3dbnode/main/main.go
index 9112a6d8cd..81d006d4b5 100644
--- a/src/cmd/services/m3dbnode/main/main.go
+++ b/src/cmd/services/m3dbnode/main/main.go
@@ -56,7 +56,7 @@ func main() {
os.Exit(1)
}
- if err := cfg.InitDefaultsAndValidate(); err != nil {
+ if err := cfg.Validate(); err != nil {
// NB(r): Use fmt.Fprintf(os.Stderr, ...) to avoid etcd.SetGlobals()
// sending stdlib "log" to black hole. Don't remove unless with good reason.
fmt.Fprintf(os.Stderr, "erro validating config: %v\n", err)
diff --git a/src/cmd/services/m3dbnode/main/main_index_test.go b/src/cmd/services/m3dbnode/main/main_index_test.go
index a4cba46dbf..bd302eba66 100644
--- a/src/cmd/services/m3dbnode/main/main_index_test.go
+++ b/src/cmd/services/m3dbnode/main/main_index_test.go
@@ -111,7 +111,10 @@ func TestIndexEnabledServer(t *testing.T) {
err = xconfig.LoadFile(&cfg, configFd.Name(), xconfig.Options{})
require.NoError(t, err)
- syncCluster, err := cfg.DB.EnvironmentConfig.Services.SyncCluster()
+ envCfg, err := cfg.DB.DiscoveryConfig.EnvironmentConfig(hostID)
+ require.NoError(t, err)
+
+ syncCluster, err := envCfg.Services.SyncCluster()
require.NoError(t, err)
configSvcClient, err := syncCluster.Service.NewClient(instrument.NewOptions().
SetLogger(zap.NewNop()))
@@ -193,7 +196,7 @@ func TestIndexEnabledServer(t *testing.T) {
// NB(r): Make sure client config points to the root config
// service since we're going to instantiate the client configuration
// just by itself.
- cfg.DB.Client.EnvironmentConfig = &cfg.DB.EnvironmentConfig
+ cfg.DB.Client.EnvironmentConfig = &envCfg
cli, err := cfg.DB.Client.NewClient(client.ConfigurationParameters{})
require.NoError(t, err)
@@ -347,7 +350,6 @@ db:
gcPercentage: 100
writeNewSeriesAsync: false
- writeNewSeriesLimitPerSecond: 1048576
writeNewSeriesBackoffDuration: 2ms
commitlog:
@@ -448,13 +450,14 @@ db:
- capacity: 4096
size: 128
- config:
- service:
- env: {{.ServiceEnv}}
- zone: {{.ServiceZone}}
- service: {{.ServiceName}}
- cacheDir: {{.ConfigServiceCacheDir}}
- etcdClusters:
- - zone: {{.ServiceZone}}
- endpoints: {{.EtcdEndpoints}}
+ discovery:
+ config:
+ service:
+ env: {{.ServiceEnv}}
+ zone: {{.ServiceZone}}
+ service: {{.ServiceName}}
+ cacheDir: {{.ConfigServiceCacheDir}}
+ etcdClusters:
+ - zone: {{.ServiceZone}}
+ endpoints: {{.EtcdEndpoints}}
`
diff --git a/src/cmd/services/m3dbnode/main/main_test.go b/src/cmd/services/m3dbnode/main/main_test.go
index d8068c6e29..ccfafbf771 100644
--- a/src/cmd/services/m3dbnode/main/main_test.go
+++ b/src/cmd/services/m3dbnode/main/main_test.go
@@ -103,7 +103,10 @@ func TestConfig(t *testing.T) {
err = xconfig.LoadFile(&cfg, configFd.Name(), xconfig.Options{})
require.NoError(t, err)
- syncCluster, err := cfg.DB.EnvironmentConfig.Services.SyncCluster()
+ envCfg, err := cfg.DB.DiscoveryConfig.EnvironmentConfig(hostID)
+ require.NoError(t, err)
+
+ syncCluster, err := envCfg.Services.SyncCluster()
require.NoError(t, err)
configSvcClient, err := syncCluster.Service.NewClient(instrument.NewOptions().
SetLogger(zap.NewNop()))
@@ -185,7 +188,7 @@ func TestConfig(t *testing.T) {
// NB(r): Make sure client config points to the root config
// service since we're going to instantiate the client configuration
// just by itself.
- cfg.DB.Client.EnvironmentConfig = &cfg.DB.EnvironmentConfig
+ cfg.DB.Client.EnvironmentConfig = &envCfg
cli, err := cfg.DB.Client.NewClient(client.ConfigurationParameters{})
require.NoError(t, err)
@@ -334,7 +337,10 @@ func TestEmbeddedConfig(t *testing.T) {
err = xconfig.LoadFile(&cfg, configFd.Name(), xconfig.Options{})
require.NoError(t, err)
- syncCluster, err := cfg.DB.EnvironmentConfig.Services.SyncCluster()
+ envCfg, err := cfg.DB.DiscoveryConfig.EnvironmentConfig(hostID)
+ require.NoError(t, err)
+
+ syncCluster, err := envCfg.Services.SyncCluster()
require.NoError(t, err)
configSvcClient, err := syncCluster.Service.NewClient(instrument.NewOptions().
SetLogger(zap.NewNop()))
@@ -395,7 +401,7 @@ func TestEmbeddedConfig(t *testing.T) {
// NB(r): Make sure client config points to the root config
// service since we're going to instantiate the client configuration
// just by itself.
- cfg.DB.Client.EnvironmentConfig = &cfg.DB.EnvironmentConfig
+ cfg.DB.Client.EnvironmentConfig = &envCfg
cli, err := cfg.DB.Client.NewClient(client.ConfigurationParameters{})
require.NoError(t, err)
@@ -503,7 +509,6 @@ db:
gcPercentage: 100
writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
writeNewSeriesBackoffDuration: 2ms
commitlog:
@@ -614,40 +619,42 @@ db:
`
kvConfigPortion = `
- config:
- service:
- env: {{.ServiceEnv}}
- zone: {{.ServiceZone}}
- service: {{.ServiceName}}
- cacheDir: {{.ConfigServiceCacheDir}}
- etcdClusters:
- - zone: {{.ServiceZone}}
- endpoints: {{.EtcdEndpoints}}
+ discovery:
+ config:
+ service:
+ env: {{.ServiceEnv}}
+ zone: {{.ServiceZone}}
+ service: {{.ServiceName}}
+ cacheDir: {{.ConfigServiceCacheDir}}
+ etcdClusters:
+ - zone: {{.ServiceZone}}
+ endpoints: {{.EtcdEndpoints}}
`
embeddedKVConfigPortion = `
- config:
- service:
- env: {{.ServiceEnv}}
- zone: {{.ServiceZone}}
- service: {{.ServiceName}}
- cacheDir: {{.ConfigServiceCacheDir}}
- etcdClusters:
- - zone: {{.ServiceZone}}
- endpoints:
- - {{.EtcdEndpoint}}
- seedNodes:
- rootDir: {{.EmbeddedKVDir}}
- listenPeerUrls:
- - {{.LPURL}}
- listenClientUrls:
- - {{.LCURL}}
- initialAdvertisePeerUrls:
- - {{.APURL}}
- advertiseClientUrls:
- - {{.ACURL}}
- initialCluster:
- - hostID: {{.InitialClusterHostID}}
- endpoint: {{.InitialClusterEndpoint}}
+ discovery:
+ config:
+ service:
+ env: {{.ServiceEnv}}
+ zone: {{.ServiceZone}}
+ service: {{.ServiceName}}
+ cacheDir: {{.ConfigServiceCacheDir}}
+ etcdClusters:
+ - zone: {{.ServiceZone}}
+ endpoints:
+ - {{.EtcdEndpoint}}
+ seedNodes:
+ rootDir: {{.EmbeddedKVDir}}
+ listenPeerUrls:
+ - {{.LPURL}}
+ listenClientUrls:
+ - {{.LCURL}}
+ initialAdvertisePeerUrls:
+ - {{.APURL}}
+ advertiseClientUrls:
+ - {{.ACURL}}
+ initialCluster:
+ - hostID: {{.InitialClusterHostID}}
+ endpoint: {{.InitialClusterEndpoint}}
`
)
diff --git a/src/cmd/services/m3query/config/config.go b/src/cmd/services/m3query/config/config.go
index e1da5d5e13..093f490fc7 100644
--- a/src/cmd/services/m3query/config/config.go
+++ b/src/cmd/services/m3query/config/config.go
@@ -57,6 +57,8 @@ const (
// coordinators used only to serve m3admin APIs.
NoopEtcdStorageType BackendStorageType = "noop-etcd"
+ defaultListenAddress = "0.0.0.0:7201"
+
defaultCarbonIngesterListenAddress = "0.0.0.0:7204"
defaultQueryTimeout = 30 * time.Second
@@ -65,6 +67,25 @@ const (
)
var (
+ defaultLogging = xlog.Configuration{
+ Level: "info",
+ }
+ defaultMetricsSanitization = instrument.PrometheusMetricSanitization
+ defaultMetricsExtendedMetricsType = instrument.NoExtendedMetrics
+ defaultMetrics = instrument.MetricsConfiguration{
+ RootScope: &instrument.ScopeConfiguration{
+ Prefix: "coordinator",
+ },
+ PrometheusReporter: &instrument.PrometheusConfiguration{
+ HandlerPath: "/metrics",
+ // Default to coordinator (until https://github.com/m3db/m3/issues/682 is resolved)
+ ListenAddress: "0.0.0.0:7203",
+ },
+ Sanitization: &defaultMetricsSanitization,
+ SamplingRate: 1.0,
+ ExtendedMetrics: &defaultMetricsExtendedMetricsType,
+ }
+
// 5m is the default lookback in Prometheus
defaultLookbackDuration = 5 * time.Minute
@@ -89,10 +110,10 @@ var (
// Configuration is the configuration for the query service.
type Configuration struct {
// Metrics configuration.
- Metrics instrument.MetricsConfiguration `yaml:"metrics"`
+ Metrics *instrument.MetricsConfiguration `yaml:"metrics"`
// Logging configuration.
- Logging xlog.Configuration `yaml:"logging"`
+ Logging *xlog.Configuration `yaml:"logging"`
// Tracing configures opentracing. If not provided, tracing is disabled.
Tracing opentracing.TracingConfiguration `yaml:"tracing"`
@@ -110,7 +131,7 @@ type Configuration struct {
ClusterManagement *ClusterManagementConfiguration `yaml:"clusterManagement"`
// ListenAddress is the server listen address.
- ListenAddress string `yaml:"listenAddress" validate:"nonzero"`
+ ListenAddress *string `yaml:"listenAddress"`
// Filter is the read/write/complete tags filter configuration.
Filter FilterConfiguration `yaml:"filter"`
@@ -167,11 +188,39 @@ type Configuration struct {
Debug config.DebugConfiguration `yaml:"debug"`
}
+// ListenAddressOrDefault returns the listen address or default.
+func (c *Configuration) ListenAddressOrDefault() string {
+ if c.ListenAddress != nil {
+ return *c.ListenAddress
+ }
+
+ return defaultListenAddress
+}
+
+// LoggingOrDefault returns the logging config or default.
+func (c *Configuration) LoggingOrDefault() xlog.Configuration {
+ if c.Logging != nil {
+ return *c.Logging
+ }
+
+ return defaultLogging
+}
+
+// MetricsOrDefault returns the metrics config or default.
+func (c *Configuration) MetricsOrDefault() *instrument.MetricsConfiguration {
+ if c.Metrics != nil {
+ return c.Metrics
+ }
+
+ return &defaultMetrics
+}
+
// WriteWorkerPoolOrDefault returns the write worker pool config or default.
-func (c Configuration) WriteWorkerPoolOrDefault() xconfig.WorkerPoolPolicy {
+func (c *Configuration) WriteWorkerPoolOrDefault() xconfig.WorkerPoolPolicy {
if c.WriteWorkerPool != nil {
return *c.WriteWorkerPool
}
+
return defaultWriteWorkerPool
}
diff --git a/src/cmd/services/m3query/config/testdata/config_test.yml b/src/cmd/services/m3query/config/testdata/config_test.yml
index 68250c3f0d..dbe0cbb233 100644
--- a/src/cmd/services/m3query/config/testdata/config_test.yml
+++ b/src/cmd/services/m3query/config/testdata/config_test.yml
@@ -1,18 +1,3 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
clusters:
- namespaces:
- namespace: default
@@ -33,23 +18,6 @@ clusters:
initialCluster:
- hostID: m3db_local
endpoint: http://127.0.0.1:2380
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
- writeTimeout: 10s
- fetchTimeout: 15s
- connectTimeout: 20s
- writeRetry:
- initialBackoff: 500ms
- backoffFactor: 3
- maxRetries: 2
- jitter: true
- fetchRetry:
- initialBackoff: 500ms
- backoffFactor: 2
- maxRetries: 3
- jitter: true
- backgroundHealthCheckFailLimit: 4
- backgroundHealthCheckFailThrottleFactor: 0.5
limits:
perQuery:
diff --git a/src/cmd/tools/dtest/docker/harness/resources/config/m3coordinator.yml b/src/cmd/tools/dtest/docker/harness/resources/config/m3coordinator.yml
index 53764669f0..536499f031 100644
--- a/src/cmd/tools/dtest/docker/harness/resources/config/m3coordinator.yml
+++ b/src/cmd/tools/dtest/docker/harness/resources/config/m3coordinator.yml
@@ -1,18 +1,3 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
clusters:
- namespaces:
- namespace: aggregated
@@ -33,8 +18,6 @@ clusters:
- zone: embedded
endpoints:
- dbnode01:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
carbon:
ingester:
@@ -56,6 +39,3 @@ carbon:
policies:
- resolution: 5s
retention: 10h
-
-tagOptions:
- idScheme: quoted
diff --git a/src/cmd/tools/dtest/docker/harness/resources/config/m3dbnode.yml b/src/cmd/tools/dtest/docker/harness/resources/config/m3dbnode.yml
index 1f9ab9fed6..28627c78f7 100644
--- a/src/cmd/tools/dtest/docker/harness/resources/config/m3dbnode.yml
+++ b/src/cmd/tools/dtest/docker/harness/resources/config/m3dbnode.yml
@@ -52,7 +52,6 @@ db:
gcPercentage: 100
writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
writeNewSeriesBackoffDuration: 2ms
cache:
@@ -71,21 +70,22 @@ db:
filesystem:
filePathPrefix: /var/lib/m3db
- config:
- service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - 127.0.0.1:2379
- seedNodes:
- initialCluster:
- - hostID: m3db_local
- endpoint: http://127.0.0.1:2380
-
+ discovery:
+ config:
+ service:
+ env: default_env
+ zone: embedded
+ service: m3db
+ cacheDir: /var/lib/m3kv
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - 127.0.0.1:2379
+ seedNodes:
+ initialCluster:
+ - hostID: m3db_local
+ endpoint: http://127.0.0.1:2380
+
# un-comment the lines below to enable Jaeger tracing. See https://www.jaegertracing.io/docs/1.9/getting-started/
# for quick local setup (which this config will send data to).
diff --git a/src/cmd/tools/dtest/harness/harness.go b/src/cmd/tools/dtest/harness/harness.go
index 68f8fbb4f3..2a7953e341 100644
--- a/src/cmd/tools/dtest/harness/harness.go
+++ b/src/cmd/tools/dtest/harness/harness.go
@@ -40,11 +40,10 @@ import (
"github.com/m3db/m3/src/cmd/tools/dtest/config"
"github.com/m3db/m3/src/cmd/tools/dtest/util"
"github.com/m3db/m3/src/cmd/tools/dtest/util/seed"
- xclock "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/integration/generate"
"github.com/m3db/m3/src/dbnode/kvconfig"
- "github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/x/m3em/convert"
m3emnode "github.com/m3db/m3/src/dbnode/x/m3em/node"
"github.com/m3db/m3/src/m3em/build"
@@ -52,7 +51,7 @@ import (
hb "github.com/m3db/m3/src/m3em/generated/proto/heartbeat"
"github.com/m3db/m3/src/m3em/node"
xgrpc "github.com/m3db/m3/src/m3em/x/grpc"
- m3xclock "github.com/m3db/m3/src/x/clock"
+ xclock "github.com/m3db/m3/src/x/clock"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -419,7 +418,7 @@ func (dt *DTestHarness) WaitUntilAllBootstrapped(nodes []node.ServiceNode) error
// available, or the configured bootstrap timeout period; whichever is sooner. It returns
// an error indicating if all the nodes finished bootstrapping.
func (dt *DTestHarness) WaitUntilAllShardsAvailable() error {
- allAvailable := m3xclock.WaitUntil(dt.AllShardsAvailable, dt.BootstrapTimeout())
+ allAvailable := xclock.WaitUntil(dt.AllShardsAvailable, dt.BootstrapTimeout())
if !allAvailable {
return fmt.Errorf("all shards not available")
}
diff --git a/src/dbnode/clock/options.go b/src/cmd/tools/linter/gorules/rules.go
similarity index 74%
rename from src/dbnode/clock/options.go
rename to src/cmd/tools/linter/gorules/rules.go
index fd80364a4e..565cb6f72f 100644
--- a/src/dbnode/clock/options.go
+++ b/src/cmd/tools/linter/gorules/rules.go
@@ -1,4 +1,6 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// +build ignore
+
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,29 +20,10 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package clock
-
-import (
- "time"
-)
-
-type options struct {
- nowFn NowFn
-}
+package gorules
-// NewOptions creates new clock options
-func NewOptions() Options {
- return &options{
- nowFn: time.Now,
- }
-}
-
-func (o *options) SetNowFn(value NowFn) Options {
- opts := *o
- opts.nowFn = value
- return &opts
-}
+import "github.com/quasilyte/go-ruleguard/dsl/fluent"
-func (o *options) NowFn() NowFn {
- return o.nowFn
+func _(m fluent.Matcher) {
+ m.Match(`map[$k]$v`).Where(m["k"].Type.Is("time.Time")).Report(`time.Time used as map key`)
}
diff --git a/src/dbnode/clock/types.go b/src/cmd/tools/linter/main/main.go
similarity index 76%
rename from src/dbnode/clock/types.go
rename to src/cmd/tools/linter/main/main.go
index 0b3d5a49f8..1374c5ac58 100644
--- a/src/dbnode/clock/types.go
+++ b/src/cmd/tools/linter/main/main.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,20 +18,12 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package clock
+package main
import (
- "time"
+ "golang.org/x/tools/go/analysis/multichecker"
)
-// NowFn is the function supplied to determine "now"
-type NowFn func() time.Time
-
-// Options represents the options for the clock
-type Options interface {
- // SetNowFn sets the nowFn
- SetNowFn(value NowFn) Options
-
- // NowFn returns the nowFn
- NowFn() NowFn
+func main() {
+ multichecker.Main()
}
diff --git a/src/dbnode/client/client_mock.go b/src/dbnode/client/client_mock.go
index f56af6000f..1c623c1e23 100644
--- a/src/dbnode/client/client_mock.go
+++ b/src/dbnode/client/client_mock.go
@@ -28,7 +28,6 @@ import (
"reflect"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/namespace"
@@ -37,6 +36,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/topology"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
diff --git a/src/dbnode/client/connection_pool.go b/src/dbnode/client/connection_pool.go
index e8db551890..4d1e1af1d2 100644
--- a/src/dbnode/client/connection_pool.go
+++ b/src/dbnode/client/connection_pool.go
@@ -31,7 +31,7 @@ import (
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/topology"
- xclose "github.com/m3db/m3/src/x/close"
+ xresource "github.com/m3db/m3/src/x/resource"
murmur3 "github.com/m3db/stackmurmur3/v2"
"github.com/uber-go/tally"
@@ -68,14 +68,14 @@ type connPool struct {
}
type conn struct {
- channel xclose.SimpleCloser
+ channel xresource.SimpleCloser
client rpc.TChanNode
}
// NewConnectionFn is a function that creates a connection.
type NewConnectionFn func(
channelName string, addr string, opts Options,
-) (xclose.SimpleCloser, rpc.TChanNode, error)
+) (xresource.SimpleCloser, rpc.TChanNode, error)
type healthCheckFn func(client rpc.TChanNode, opts Options) error
diff --git a/src/dbnode/client/connection_pool_test.go b/src/dbnode/client/connection_pool_test.go
index 0724171f6f..f4d391180c 100644
--- a/src/dbnode/client/connection_pool_test.go
+++ b/src/dbnode/client/connection_pool_test.go
@@ -30,7 +30,7 @@ import (
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/topology"
xclock "github.com/m3db/m3/src/x/clock"
- xclose "github.com/m3db/m3/src/x/close"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/stretchr/testify/require"
"github.com/golang/mock/gomock"
@@ -85,7 +85,7 @@ func TestConnectionPoolConnectsAndRetriesConnects(t *testing.T) {
fn := func(
ch string, addr string, opts Options,
- ) (xclose.SimpleCloser, rpc.TChanNode, error) {
+ ) (xresource.SimpleCloser, rpc.TChanNode, error) {
attempt := int(atomic.AddInt32(&attempts, 1))
if attempt == 1 {
return nil, nil, fmt.Errorf("a connect error")
@@ -237,7 +237,7 @@ func TestConnectionPoolHealthChecks(t *testing.T) {
fn := func(
ch string, addr string, opts Options,
- ) (xclose.SimpleCloser, rpc.TChanNode, error) {
+ ) (xresource.SimpleCloser, rpc.TChanNode, error) {
attempt := atomic.AddInt32(&newConnAttempt, 1)
if attempt == 1 {
return channelNone, client1, nil
diff --git a/src/dbnode/client/host_queue.go b/src/dbnode/client/host_queue.go
index d9095b38db..32a42dbb94 100644
--- a/src/dbnode/client/host_queue.go
+++ b/src/dbnode/client/host_queue.go
@@ -27,9 +27,9 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/topology"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
xsync "github.com/m3db/m3/src/x/sync"
diff --git a/src/dbnode/client/options.go b/src/dbnode/client/options.go
index a54ac12d3d..c8e350b199 100644
--- a/src/dbnode/client/options.go
+++ b/src/dbnode/client/options.go
@@ -27,7 +27,6 @@ import (
"runtime"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/dbnode/encoding/proto"
@@ -38,11 +37,12 @@ import (
m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/topology"
- xclose "github.com/m3db/m3/src/x/close"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
+ xresource "github.com/m3db/m3/src/x/resource"
xretry "github.com/m3db/m3/src/x/retry"
"github.com/m3db/m3/src/x/sampler"
"github.com/m3db/m3/src/x/serialize"
@@ -319,7 +319,7 @@ func NewOptionsForAsyncClusters(opts Options, topoInits []topology.Initializer,
func defaultNewConnectionFn(
channelName string, address string, opts Options,
-) (xclose.SimpleCloser, rpc.TChanNode, error) {
+) (xresource.SimpleCloser, rpc.TChanNode, error) {
channel, err := tchannel.NewChannel(channelName, opts.ChannelOptions())
if err != nil {
return nil, nil, err
diff --git a/src/dbnode/client/session.go b/src/dbnode/client/session.go
index c6cd514ea5..a188f105cf 100644
--- a/src/dbnode/client/session.go
+++ b/src/dbnode/client/session.go
@@ -32,7 +32,6 @@ import (
"time"
"github.com/m3db/m3/src/cluster/shard"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
@@ -48,12 +47,13 @@ import (
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
"github.com/m3db/m3/src/x/checked"
- xclose "github.com/m3db/m3/src/x/close"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
+ xresource "github.com/m3db/m3/src/x/resource"
xretry "github.com/m3db/m3/src/x/retry"
"github.com/m3db/m3/src/x/sampler"
"github.com/m3db/m3/src/x/serialize"
@@ -136,7 +136,7 @@ type sessionState struct {
type session struct {
state sessionState
opts Options
- runtimeOptsListenerCloser xclose.Closer
+ runtimeOptsListenerCloser xresource.SimpleCloser
scope tally.Scope
nowFn clock.NowFn
log *zap.Logger
diff --git a/src/dbnode/client/session_fetch_high_concurrency_test.go b/src/dbnode/client/session_fetch_high_concurrency_test.go
index 77a07ea747..a4acd087c3 100644
--- a/src/dbnode/client/session_fetch_high_concurrency_test.go
+++ b/src/dbnode/client/session_fetch_high_concurrency_test.go
@@ -34,8 +34,8 @@ import (
"github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/ts"
- xclose "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/ident"
+ xresource "github.com/m3db/m3/src/x/resource"
xtime "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
@@ -102,7 +102,7 @@ func TestSessionFetchIDsHighConcurrency(t *testing.T) {
// to be able to mock the entire end to end pipeline
newConnFn := func(
_ string, addr string, _ Options,
- ) (xclose.SimpleCloser, rpc.TChanNode, error) {
+ ) (xresource.SimpleCloser, rpc.TChanNode, error) {
mockClient := rpc.NewMockTChanNode(ctrl)
mockClient.EXPECT().Health(gomock.Any()).
Return(healthCheckResult, nil).
diff --git a/src/dbnode/client/types.go b/src/dbnode/client/types.go
index 9d5b47926e..57b8ca71a6 100644
--- a/src/dbnode/client/types.go
+++ b/src/dbnode/client/types.go
@@ -23,7 +23,6 @@ package client
import (
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/namespace"
@@ -32,6 +31,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/topology"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
diff --git a/src/dbnode/config/m3dbnode-all-config.yml b/src/dbnode/config/m3dbnode-all-config.yml
index 0849aa4cd9..da6caf8692 100644
--- a/src/dbnode/config/m3dbnode-all-config.yml
+++ b/src/dbnode/config/m3dbnode-all-config.yml
@@ -99,8 +99,6 @@ db:
# Whether new series should be created asynchronously (recommended value
# of true for high throughput.)
writeNewSeriesAsync: true
- # Maximum number of new series that can be created per second.
- writeNewSeriesLimitPerSecond: 1048576
writeNewSeriesBackoffDuration: 2ms
bootstrap:
@@ -146,23 +144,24 @@ db:
checkInterval: 1m
# etcd configuration.
- config:
- service:
- # KV environment, zone, and service from which to write/read KV data (placement
- # and configuration). Leave these as the default values unless you know what
- # you're doing.
- env: default_env
- zone: embedded
- service: m3db
- # Directory to store cached etcd data in.
- cacheDir: /var/lib/m3kv
- # Configuration to identify the etcd hosts this node should connect to.
- etcdClusters:
- - zone: embedded
- endpoints:
- - 127.0.0.1:2379
- # Should only be present if running an M3DB cluster with embedded etcd.
- seedNodes:
- initialCluster:
- - hostID: m3db_local
- endpoint: http://127.0.0.1:2380
+ discovery:
+ config:
+ service:
+ # KV environment, zone, and service from which to write/read KV data (placement
+ # and configuration). Leave these as the default values unless you know what
+ # you're doing.
+ env: default_env
+ zone: embedded
+ service: m3db
+ # Directory to store cached etcd data in.
+ cacheDir: /var/lib/m3kv
+ # Configuration to identify the etcd hosts this node should connect to.
+ etcdClusters:
+ - zone: embedded
+ endpoints:
+ - 127.0.0.1:2379
+ # Should only be present if running an M3DB cluster with embedded etcd.
+ seedNodes:
+ initialCluster:
+ - hostID: m3db_local
+ endpoint: http://127.0.0.1:2380
diff --git a/src/dbnode/config/m3dbnode-cluster-template.yml b/src/dbnode/config/m3dbnode-cluster-template.yml
index d5cddf3c5a..4d6689a84c 100644
--- a/src/dbnode/config/m3dbnode-cluster-template.yml
+++ b/src/dbnode/config/m3dbnode-cluster-template.yml
@@ -1,40 +1,11 @@
coordinator:
- listenAddress: 0.0.0.0:7201
-
local:
namespaces:
- namespace: default
type: unaggregated
retention: 48h
- logging:
- level: info
-
- metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
- tagOptions:
- # Configuration setting for generating metric IDs from tags.
- idScheme: quoted
-
db:
- logging:
- level: info
-
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
hostID:
resolver: hostname
@@ -59,35 +30,3 @@ db:
# endpoint: http://HOST2_STATIC_IP_ADDRESS:2380
# - hostID: host3
# endpoint: http://HOST3_STATIC_IP_ADDRESS:2380
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
- client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- cache:
- series:
- policy: lru
- postingsList:
- size: 262144
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
-
- filesystem:
- filePathPrefix: /var/lib/m3db
diff --git a/src/dbnode/config/m3dbnode-local-etcd-proto.yml b/src/dbnode/config/m3dbnode-local-etcd-proto.yml
index 0d34b5f53a..09836779be 100644
--- a/src/dbnode/config/m3dbnode-local-etcd-proto.yml
+++ b/src/dbnode/config/m3dbnode-local-etcd-proto.yml
@@ -1,101 +1,17 @@
-coordinator:
- listenAddress: 0.0.0.0:7201
-
- local:
- namespaces:
- - namespace: default
- type: unaggregated
- retention: 48h
-
- logging:
- level: info
-
- metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
- tagOptions:
- # Configuration setting for generating metric IDs from tags.
- idScheme: quoted
+coordinator: {}
db:
- logging:
- level: info
-
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
hostID:
resolver: config
value: m3db_local
- client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- cache:
- series:
- policy: lru
- postingsList:
- size: 262144
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
-
- filesystem:
- filePathPrefix: /var/lib/m3db
-
- config:
- service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - 127.0.0.1:2379
- seedNodes:
- initialCluster:
- - hostID: m3db_local
- endpoint: http://127.0.0.1:2380
+ discovery:
+ type: m3db_single_node
proto:
- enabled: true
- schema_registry:
- # Need an entry for each configured namespace.
- "default":
- schemaFilePath: "/etc/m3dbnode/default_schema.proto"
- messageName: "VehicleLocation"
-
- # un-comment the lines below to enable Jaeger tracing. See https://www.jaegertracing.io/docs/1.9/getting-started/
- # for quick local setup (which this config will send data to).
-
- # tracing:
- # backend: jaeger
+ enabled: true
+ schema_registry:
+ # Need an entry for each configured namespace.
+ "default":
+ schemaFilePath: "/etc/m3dbnode/default_schema.proto"
+ messageName: "VehicleLocation"
diff --git a/src/dbnode/config/m3dbnode-local-etcd.yml b/src/dbnode/config/m3dbnode-local-etcd.yml
index 1f9ab9fed6..5ebf51b66e 100644
--- a/src/dbnode/config/m3dbnode-local-etcd.yml
+++ b/src/dbnode/config/m3dbnode-local-etcd.yml
@@ -1,90 +1,12 @@
-coordinator:
- listenAddress: 0.0.0.0:7201
-
- local:
- namespaces:
- - namespace: default
- type: unaggregated
- retention: 48h
-
- logging:
- level: info
-
- metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
- tagOptions:
- # Configuration setting for generating metric IDs from tags.
- idScheme: quoted
+coordinator: {}
db:
- logging:
- level: info
-
- metrics:
- prometheus:
- handlerPath: /metrics
- sanitization: prometheus
- samplingRate: 1.0
- extended: detailed
-
- listenAddress: 0.0.0.0:9000
- clusterListenAddress: 0.0.0.0:9001
- httpNodeListenAddress: 0.0.0.0:9002
- httpClusterListenAddress: 0.0.0.0:9003
- debugListenAddress: 0.0.0.0:9004
-
hostID:
resolver: config
value: m3db_local
- client:
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
- gcPercentage: 100
-
- writeNewSeriesAsync: true
- writeNewSeriesLimitPerSecond: 1048576
- writeNewSeriesBackoffDuration: 2ms
-
- cache:
- series:
- policy: lru
- postingsList:
- size: 262144
-
- commitlog:
- flushMaxBytes: 524288
- flushEvery: 1s
- queue:
- calculationType: fixed
- size: 2097152
-
- filesystem:
- filePathPrefix: /var/lib/m3db
-
- config:
- service:
- env: default_env
- zone: embedded
- service: m3db
- cacheDir: /var/lib/m3kv
- etcdClusters:
- - zone: embedded
- endpoints:
- - 127.0.0.1:2379
- seedNodes:
- initialCluster:
- - hostID: m3db_local
- endpoint: http://127.0.0.1:2380
+ discovery:
+ type: m3db_single_node
# un-comment the lines below to enable Jaeger tracing. See https://www.jaegertracing.io/docs/1.9/getting-started/
# for quick local setup (which this config will send data to).
diff --git a/src/dbnode/digest/fd_digest.go b/src/dbnode/digest/fd_digest.go
index af316ff3e3..c8db06a14c 100644
--- a/src/dbnode/digest/fd_digest.go
+++ b/src/dbnode/digest/fd_digest.go
@@ -25,12 +25,12 @@ import (
"hash/adler32"
"os"
- xclose "github.com/m3db/m3/src/x/close"
+ xresource "github.com/m3db/m3/src/x/resource"
)
// FdWithDigest is a container for a file descriptor and the digest for the file contents.
type FdWithDigest interface {
- xclose.Closer
+ xresource.Closer
// Fd returns the file descriptor.
Fd() *os.File
diff --git a/src/dbnode/discovery/config.go b/src/dbnode/discovery/config.go
new file mode 100644
index 0000000000..8cb86318a1
--- /dev/null
+++ b/src/dbnode/discovery/config.go
@@ -0,0 +1,235 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package discovery provides discovery configuration.
+package discovery
+
+import (
+ "fmt"
+
+ etcdclient "github.com/m3db/m3/src/cluster/client/etcd"
+ "github.com/m3db/m3/src/dbnode/environment"
+)
+
+const (
+ defaultEnvironment = "default_env"
+ defaultZone = "embedded"
+ defaultM3DBService = "m3db"
+ defaultM3AggregatorService = "m3aggregator"
+ defaultCacheDirectory = "/var/lib/m3kv"
+ defaultSingleNodeClusterEndpoint = "127.0.0.1:2379"
+ defaultSingleNodeClusterSeedEndpoint = "http://127.0.0.1:2380"
+)
+
+var validDiscoveryConfigTypes = []ConfigurationType{
+ ConfigType,
+ M3DBSingleNodeType,
+ M3DBClusterType,
+ M3AggregatorClusterType,
+}
+
+// ConfigurationType defines the type of discovery configuration.
+type ConfigurationType uint
+
+const (
+ // ConfigType defines a generic definition for service discovery via etcd.
+ ConfigType ConfigurationType = iota
+ // M3DBSingleNodeType defines configuration for a single M3DB node via etcd.
+ M3DBSingleNodeType
+ // M3DBClusterType defines M3DB discovery via etcd.
+ M3DBClusterType
+ // M3AggregatorClusterType defines M3DB discovery via etcd.
+ M3AggregatorClusterType
+)
+
+// UnmarshalYAML unmarshals an ConfigurationType into a valid type from string.
+func (t *ConfigurationType) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var str string
+ if err := unmarshal(&str); err != nil {
+ return err
+ }
+
+ // If unspecified, use default mode.
+ if str == "" {
+ *t = ConfigType
+
+ return nil
+ }
+
+ for _, valid := range validDiscoveryConfigTypes {
+ if str == valid.String() {
+ *t = valid
+
+ return nil
+ }
+ }
+
+ return fmt.Errorf("invalid ConfigurationType '%s' valid types are: %s",
+ str, validDiscoveryConfigTypes)
+}
+
+// String returns the discovery configuration type as a string.
+func (t ConfigurationType) String() string {
+ switch t {
+ case ConfigType:
+ return "config"
+ case M3DBSingleNodeType:
+ return "m3db_single_node"
+ case M3DBClusterType:
+ return "m3db_cluster"
+ case M3AggregatorClusterType:
+ return "m3aggregator_cluster"
+ }
+ return "unknown"
+}
+
+// Configuration defines how services are to be discovered.
+type Configuration struct {
+ // Type defines the type of discovery configuration being used.
+ Type *ConfigurationType `yaml:"type"`
+
+ // M3DBCluster defines M3DB discovery via etcd.
+ M3DBCluster *M3DBClusterDiscoveryConfiguration `yaml:"m3dbCluster"`
+
+ // M3AggregatorCluster defines M3Aggregator discovery via etcd.
+ M3AggregatorCluster *M3AggregatorClusterDiscoveryConfiguration `yaml:"m3AggregatorCluster"`
+
+ // Config defines a generic definition for service discovery via etcd.
+ Config *environment.Configuration `yaml:"config"`
+}
+
+// M3DBClusterDiscoveryConfiguration defines discovery configuration for M3DB.
+type M3DBClusterDiscoveryConfiguration struct {
+ Env string `yaml:"env" validate:"nonzero"`
+ Zone *string `yaml:"zone"`
+ Endpoints []string `yaml:"endpoints"`
+}
+
+// M3AggregatorClusterDiscoveryConfiguration defines discovery configuration for M3Aggregator.
+type M3AggregatorClusterDiscoveryConfiguration struct {
+ Env string `yaml:"env"`
+ Zone *string `yaml:"zone"`
+ Endpoints []string `yaml:"endpoints"`
+}
+
+// EnvironmentConfig provides the environment configuration
+// based on the type of discovery configuration set.
+func (c *Configuration) EnvironmentConfig(
+ hostID string,
+) (environment.Configuration, error) {
+ discoveryConfigType := ConfigType
+ if c.Type != nil {
+ discoveryConfigType = *c.Type
+ }
+
+ switch discoveryConfigType {
+ case ConfigType:
+ return *c.Config, nil
+ case M3DBSingleNodeType:
+ return c.m3dbSingleNodeEnvConfig(hostID), nil
+ case M3DBClusterType:
+ return c.envConfig(
+ discoveryConfigType,
+ defaultM3DBService,
+ c.M3DBCluster.Zone,
+ c.M3DBCluster.Env,
+ c.M3DBCluster.Endpoints,
+ )
+ case M3AggregatorClusterType:
+ return c.envConfig(
+ discoveryConfigType,
+ defaultM3AggregatorService,
+ c.M3AggregatorCluster.Zone,
+ c.M3AggregatorCluster.Env,
+ c.M3AggregatorCluster.Endpoints,
+ )
+ }
+
+ return environment.Configuration{}, fmt.Errorf("unrecognized discovery type: %d", c.Type)
+}
+
+func (c *Configuration) m3dbSingleNodeEnvConfig(
+ hostID string,
+) environment.Configuration {
+ return environment.Configuration{
+ Services: []*environment.DynamicCluster{
+ {
+ Service: &etcdclient.Configuration{
+ Service: defaultM3DBService,
+ CacheDir: defaultCacheDirectory,
+ Zone: defaultZone,
+ Env: defaultEnvironment,
+ ETCDClusters: []etcdclient.ClusterConfig{
+ {
+ Zone: defaultZone,
+ Endpoints: []string{defaultSingleNodeClusterEndpoint},
+ },
+ },
+ },
+ },
+ },
+ SeedNodes: &environment.SeedNodesConfig{
+ InitialCluster: []environment.SeedNode{
+ {
+ HostID: hostID,
+ Endpoint: defaultSingleNodeClusterSeedEndpoint,
+ },
+ },
+ },
+ }
+}
+
+func (c *Configuration) envConfig(
+ configType ConfigurationType,
+ service string,
+ zone *string,
+ env string,
+ endpoints []string,
+) (environment.Configuration, error) {
+ if c == nil {
+ err := fmt.Errorf("discovery configuration required for type: %s",
+ configType.String())
+ return environment.Configuration{}, err
+ }
+
+ validZone := defaultZone
+ if zone != nil {
+ validZone = *zone
+ }
+
+ return environment.Configuration{
+ Services: []*environment.DynamicCluster{
+ {
+ Service: &etcdclient.Configuration{
+ Service: service,
+ CacheDir: defaultCacheDirectory,
+ Zone: validZone,
+ Env: env,
+ ETCDClusters: []etcdclient.ClusterConfig{
+ {
+ Zone: validZone,
+ Endpoints: endpoints,
+ },
+ },
+ },
+ },
+ },
+ }, nil
+}
diff --git a/src/dbnode/discovery/config_test.go b/src/dbnode/discovery/config_test.go
new file mode 100644
index 0000000000..307769688c
--- /dev/null
+++ b/src/dbnode/discovery/config_test.go
@@ -0,0 +1,166 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package discovery
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/m3db/m3/src/dbnode/environment"
+ "github.com/m3db/m3/src/x/config"
+)
+
+func TestM3DBSingleNodeType(t *testing.T) {
+ in := `
+type: m3db_single_node
+`
+
+ hostID := "test_id"
+ envConfig := getEnvConfig(t, in, hostID)
+
+ assert.Equal(t, 1, len(envConfig.Services))
+ assert.Equal(t, 1, len(envConfig.SeedNodes.InitialCluster))
+
+ s := envConfig.Services[0].Service
+ assert.Equal(t, defaultM3DBService, s.Service)
+ assert.Equal(t, defaultEnvironment, s.Env)
+ assert.Equal(t, defaultZone, s.Zone)
+ assert.Equal(t, defaultCacheDirectory, s.CacheDir)
+ assert.Equal(t, 1, len(s.ETCDClusters))
+ assert.Equal(t, defaultZone, s.ETCDClusters[0].Zone)
+ assert.Equal(t, 1, len(s.ETCDClusters[0].Endpoints))
+ assert.Equal(t, defaultSingleNodeClusterEndpoint, s.ETCDClusters[0].Endpoints[0])
+
+ c := envConfig.SeedNodes.InitialCluster[0]
+ assert.Equal(t, defaultSingleNodeClusterSeedEndpoint, c.Endpoint)
+ assert.Equal(t, hostID, c.HostID)
+}
+
+func TestM3DBClusterType(t *testing.T) {
+ in := `
+type: m3db_cluster
+m3dbCluster:
+ env: a
+ zone: b
+ endpoints:
+ - end_1
+ - end_2
+`
+
+ envConfig := getEnvConfig(t, in, "")
+ validateClusterConfig(t, envConfig, defaultM3DBService)
+}
+
+func TestM3AggregatorClusterType(t *testing.T) {
+ in := `
+type: m3aggregator_cluster
+m3AggregatorCluster:
+ env: a
+ zone: b
+ endpoints:
+ - end_1
+ - end_2
+`
+
+ envConfig := getEnvConfig(t, in, "")
+ validateClusterConfig(t, envConfig, defaultM3AggregatorService)
+}
+
+func TestConfigType(t *testing.T) {
+ in := `
+config:
+ service:
+ env: test_env
+ zone: test_zone
+ service: test_service
+ cacheDir: test/cache
+ etcdClusters:
+ - zone: test_zone_2
+ endpoints:
+ - 127.0.0.1:2379
+ seedNodes:
+ initialCluster:
+ - hostID: host_id
+ endpoint: http://127.0.0.1:2380
+`
+
+ hostID := "test_id"
+ envConfig := getEnvConfig(t, in, hostID)
+
+ assert.Equal(t, 1, len(envConfig.Services))
+ assert.Equal(t, 1, len(envConfig.SeedNodes.InitialCluster))
+
+ s := envConfig.Services[0].Service
+ assert.Equal(t, "test_service", s.Service)
+ assert.Equal(t, "test_env", s.Env)
+ assert.Equal(t, "test_zone", s.Zone)
+ assert.Equal(t, "test/cache", s.CacheDir)
+ assert.Equal(t, 1, len(s.ETCDClusters))
+ assert.Equal(t, "test_zone_2", s.ETCDClusters[0].Zone)
+ assert.Equal(t, 1, len(s.ETCDClusters[0].Endpoints))
+ assert.Equal(t, "127.0.0.1:2379", s.ETCDClusters[0].Endpoints[0])
+
+ c := envConfig.SeedNodes.InitialCluster[0]
+ assert.Equal(t, "http://127.0.0.1:2380", c.Endpoint)
+ assert.Equal(t, "host_id", c.HostID)
+}
+
+func getEnvConfig(t *testing.T, in string, hostID string) environment.Configuration {
+ fd, err := ioutil.TempFile("", "config.yaml")
+ assert.NoError(t, err)
+ defer func() {
+ assert.NoError(t, fd.Close())
+ assert.NoError(t, os.Remove(fd.Name()))
+ }()
+
+ _, err = fd.Write([]byte(in))
+ assert.NoError(t, err)
+
+ var cfg Configuration
+ err = config.LoadFile(&cfg, fd.Name(), config.Options{})
+ assert.NoError(t, err)
+
+ envConfig, err := cfg.EnvironmentConfig(hostID)
+ assert.NoError(t, err)
+
+ return envConfig
+}
+
+func validateClusterConfig(t *testing.T,
+ envConfig environment.Configuration,
+ expectedService string,
+) {
+ assert.Equal(t, 1, len(envConfig.Services))
+ assert.Nil(t, envConfig.SeedNodes)
+ s := envConfig.Services[0].Service
+ assert.Equal(t, expectedService, s.Service)
+ assert.Equal(t, "a", s.Env)
+ assert.Equal(t, "b", s.Zone)
+ assert.Equal(t, defaultCacheDirectory, s.CacheDir)
+ assert.Equal(t, 1, len(s.ETCDClusters))
+ assert.Equal(t, "b", s.ETCDClusters[0].Zone)
+ assert.Equal(t, 2, len(s.ETCDClusters[0].Endpoints))
+ assert.Equal(t, "end_1", s.ETCDClusters[0].Endpoints[0])
+ assert.Equal(t, "end_2", s.ETCDClusters[0].Endpoints[1])
+}
diff --git a/src/dbnode/encoding/encoding.go b/src/dbnode/encoding/encoding.go
index 4d65f5e921..f6623124ec 100644
--- a/src/dbnode/encoding/encoding.go
+++ b/src/dbnode/encoding/encoding.go
@@ -22,25 +22,16 @@ package encoding
import "math/bits"
-// Bit is just a byte
+// Bit is just a byte.
type Bit byte
-// NumSig returns the number of significant values in a uint64
+// NumSig returns the number of significant bits in a uint64.
func NumSig(v uint64) uint8 {
- if v == 0 {
- return 0
- }
-
- numLeading := uint8(0)
- for tmp := v; (tmp & (1 << 63)) == 0; tmp <<= 1 {
- numLeading++
- }
-
- return uint8(64) - numLeading
+ return uint8(64 - bits.LeadingZeros64(v))
}
// LeadingAndTrailingZeros calculates the number of leading and trailing 0s
-// for a uint64
+// for a uint64.
func LeadingAndTrailingZeros(v uint64) (int, int) {
if v == 0 {
return 64, 0
@@ -51,7 +42,7 @@ func LeadingAndTrailingZeros(v uint64) (int, int) {
return numLeading, numTrailing
}
-// SignExtend sign extends the highest bit of v which has numBits (<=64)
+// SignExtend sign extends the highest bit of v which has numBits (<=64).
func SignExtend(v uint64, numBits uint) int64 {
shift := 64 - numBits
return (int64(v) << shift) >> shift
diff --git a/src/dbnode/encoding/encoding_test.go b/src/dbnode/encoding/encoding_test.go
index c48ad981b2..a81a86ae08 100644
--- a/src/dbnode/encoding/encoding_test.go
+++ b/src/dbnode/encoding/encoding_test.go
@@ -21,11 +21,17 @@
package encoding
import (
+ "math"
"testing"
"github.com/stretchr/testify/require"
)
func TestNumSig(t *testing.T) {
+ require.Equal(t, uint8(0), NumSig(uint64(0)))
+ require.Equal(t, uint8(1), NumSig(uint64(1)))
require.Equal(t, uint8(4), NumSig(uint64(12)))
+ require.Equal(t, uint8(63), NumSig(uint64(math.MaxUint64>>1)))
+ require.Equal(t, uint8(64), NumSig(uint64(math.MaxUint64)))
+ require.Equal(t, uint8(64), NumSig(uint64(math.MaxUint64-1)))
}
diff --git a/src/dbnode/encoding/m3tsz/encoder_benchmark_test.go b/src/dbnode/encoding/m3tsz/encoder_benchmark_test.go
new file mode 100644
index 0000000000..67c62b4660
--- /dev/null
+++ b/src/dbnode/encoding/m3tsz/encoder_benchmark_test.go
@@ -0,0 +1,102 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package m3tsz
+
+import (
+ "bytes"
+ "encoding/base64"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/m3db/m3/src/dbnode/encoding"
+ "github.com/m3db/m3/src/dbnode/ts"
+ xtime "github.com/m3db/m3/src/x/time"
+)
+
+var sampleSeriesBase64 = []string{
+ "FiYqRnIdAACAQEAAAAArkizADVrDlnvgAATiGAAEbAABZgABkwAA6DFXAxnaGOwDF2ON7Yw85trFGksvYiyRjTFW3MeYs21wLHm9t/YkxtjbHW5vCYi6JwTF2LMcYsGI2DGdTRBjsCxRi7bHdsRZI2ZjDdGQsfbs15ijGHosPYqxNjjPGnMcYu29jbJmusVY03FibeGkMYY8xVizVHHsXY+3BjTR2NMYcE2ti7V2yMZb63hi7dmdMYdoxpizgGxMWa805ljgGMsVY4zRiLiHWslZo11lLOGLMdY61Zkjd2uMRZi1BljI2ostbo1hmDfHasVZUytjTeWOshZK3BjTdGtsWYwxdjwYjgMZpNwzLKM8+btsqGOwjHGMNubIxtnTVWVt1bUxRtLWmWtnY+x1nLU2YtjcuzJw7VWbMfYu0RjLVWbM6aY4lpjT2LtVaS0NqTGGJNeYq3torFWMNJaS1ZrTRWpuCYw1xjLFmItCaExJkDWGZMWZg6xjLMGLtiZmxps7EWLNlYw6NjzFmLtvZaxhi7GGNBiPAxmK8DRM0yj8uq2TKMk0DZOu+rPMsyjQumGOxTgGMNzaaxVrLEWLMUZk0xoDy2QN3Y8yNvLNGmM0boxRtrxGNMcY20dy7G2fM2bqyBjrXmHNyY4xlvzGWJsXcIxdt7H2LtIY2xRq7gGJsbZoxRiTVWVtvaey92LdGKMeYsxoMR+GM9WgZcMdsWKNrcIxNibl2KMaY0x5mTOWOvecYxRuDbGLsubWxJpjaWKsebExZv7JGKsucAxVu7HGOMfbkxdtjdGLMZY8xBkjH2Kt1d2xVtzIGLuCYyyBjTJ2KstbWxVtDbmMMzY6xF4bPWJtxdgxJvrJWMsdaGxhuzTWJs1egxRt7ZmItNYuxRpzFmOtvdyw9kTZ2LtzdaxZiTV2LsabYxJmTXWJtzZCx5pTH2Lt4cQxdtTiWNNea4xNn7imLtccaxVjTZmLMYYuxZnDSmNM0euxVmjU2KtwcWxRjrj2JsbdsxhjjHWNhiOAxW9rhjOwMdl2LN3aczRjbsmOOCbkxhkDa2LN3Zo1xtjGGMtxbexNmLJWJsZbQ19jDU2LNydwxZnLIGONwbI1xuTNGLNqYwxNnbVmQMdcg15uDF2NtKbaxdq7SWKtqa015jbbmNMib2x9mrHmMtxZA1htrWmLNzZGxNoLQmONzbA1drbGmJt0ZCxRjLIWJt0Y41lsDNWJtiaqxFjzF2OuEbk1ltjRGKNYZUxRtjI2MN/eI11vbe2Jsob4xljrJmKttaM19j7HGKuEaOxJkLdmJOIcW1hmLbWNMvY6xZmTHmMs9b82Fk7TmKM7cKxtijW2LMuYy2BpLQ2NNacOxpjbg2OODaSxp4LVmJtfbux1vcAA", // nolint:lll
+ "FiYqRnIdAACAQEAAAAArkizADAfgAATiCSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSTAAA=", // nolint:lll
+ "FiYqRnIdAACAQEAAAABDnTBYE/Bho3fYmHbfAAAnEMAB+lf3Y8nesGNjT6sqdklzsPFHkkSBobeKPJIkDQy3ijySJA0MlgE3WlChWbTdaUKFZkudjtB7gub4oltoPcFzfFEkkksFUt2Tfa6Fqpbsm+10lzsPqTuES/mJJJLpW9i+c6zi+SW7F851nF9uxfOdZxfLdi+c6zi+SSXOw8DYDYkTFt4GwGxImLLeBsBsSJi28DYDYkTFkulb2L5zrOL5JdC/dMVuc3q9t0xW5zer23TFbnN6vLbpitzm9XtumK3Ob1eW3TFbnN6vJLbpitzm9Xufh7R1X1eVLLJJaw/0a8y0ktYf6NebSS1h/o15lpJaw/0a82klrD/RrzLSS1h/o15lz8PaOq+rypZJYpkiO1dsA1MkR2rtkunPPlZJttpJc/D/fBmnRHWWSS1h/o15lpJaw/0a82klrD/RrzLSS1h/o15kloYescF8rGhh6xwXytz8P1pjNBhIbfrTGaDCQ/AAAnZn///2ERxWSw4LulSvs8twXdKlfZ7cF3SpX2e6F98YLEnhMbXxgsSeExtfGCxJ4TGWvjBYk8JjJJLn3cL98PJ8jbhfvh5PkZLoX3sr7uILjlr2V93EFxySS593C/fDyfI24X74eT5G6F97K+7iC47Xsr7uILjteyvu4guOWvZX3cQXHJJJa9lfdxBcdr2V93EFx3Pw9tAaypmht7aA1lTNDLe2gNZUzQyS3toDWVM0MktDJI57e/ac7HxxmbkR/pJYIWOVrdpMJJJaFjla3aTLQscrW7SbQscrW7SZaFjla3aTJJJaFjla3aTdC+AoWxZUHMtRGb6NHgwWojN9GjwZJaiM30aPBkudh5g7Stcc3JJbzB2la45u3mDtK1xzckksJxZZl2POLTiyzLsedpxZZl2POWnFlmXY85JJc7DzB2la45u3mDtK1xzdhOLLMux5xdK3UGgGFJIS2oNAMKSQkujZOLLMux5yXOw8wdpWuObt5g7Stcc3LeYO0rXHN28wdpWuObksEiGQkVkWJJLo3X2kSbTyRdCywmW9XXelz8OPQTV9E75bj0E1fRO+Szv8H06YXklzsf957cWnANv957cWnANv957cWnAMklv957cWnAMlhn8hhg0Dol0L4gCZVqxQ3Pw49BNX0Tvtx6CavonfJZEATKtWKGSWiAJlWrFDLRAEyrVihtEATKtWKG59+bUdK+4kLZtR0r7iQkls2o6V9xIS2bUdK+4kLZtR0r7iQls2o6V9xIWzajpX3EhLn4dmc/ehX1W7M5+9Cvqkkt2Zz96FfVbszn70K+qW7M5+9Cvqt2Zz96FfVYLZCxIudM1shYkXOmS5+HZnP3oV9Ukt2Zz96FfVLdmc/ehX1W7M5+9CvqtMWFPI9/rJJJYsdtxTlpY1jtuKctLLRg2ocIN1owbUOEG66F3R+2WXEH26P2yy4g+SSS3R+2WXEHy3R+2WXEH26P2yy4g+3R+2WXEH2Rg2ocIN0SSWPcAhr1yze4BDXrlkujZGDahwg3WjBtQ4QbpdCnzVjlG88kkklz7KKnPV58+602dsW5NrbO2Lcm1kklz7KKnPV58+1FTnq8+fJdabO2Lcm1ls7YtybWSSSS59lFTnq8+faipz1efPlqKnPV58+1FTnq8+fJJLrTZ2xbk2stnbFuTa2zti3Jtbo3f04J9i5nZEamPsK4pLo2wbtt7vTWS59MzEoWOlrTMShY6Wklz8PEbYJKYAbnYY8FgdCtcyWH3EI5E2HN9xCORNhyXOwx4LA6Fa5kuhfokiJV00GS59MzEoWOlpJLTMShY6WkuhfokiJV00GSSWG4TfVnMCbcJvqzmBLbhN9WcwLbhN9WcwJbcJvqzmBLnY+85UiQZUpb3nKkSDKlJYG4TfVnMCJdC+kcG+4Y9stSODfcMe2XNxxwbSeDL2XNy6dODPdz6pJJLCdLyOrAioktOl5HVgRVz7ultcsuJ3kkl0bsl+P9BB4kuhZtpdeDGBS6F9/6WHcIbJJZkvx/oIPEl0LZGbL+mLngLftIzZf0xc8B0jNl/TFzl0L/UkMAGrbSSSWZL8f6CDxLZL8f6CDxbJfj/QQeJdC+6q01qmjEkklzsdKzCAxSsUtpWYQGKVit3MU5BmXyliJeKJtHI8kks9H3goTte3o+8FCdry3o+8FCdr29H3goTteXQviXiibRyPJLeaMpHsCQFvNGUj2BLeaMpHsCS3mjKR7AlvNGUj2BJbzRlI9gSSSXRu0uvhH+2y2l18I/22S2l18I/22W0uvhH+2yW0uvhH+2ySSSS52H18ZprwH9vr4zTXgP5b6+M014D+SSSSSXRuii6kmXyCSSS0UXUky+QS0UXUky+QSXNy0u9Sg9bxrZzy5yJx+gl0L+SxCJdwZS3JYhEu4MrcliES7gyluSxCJdwZSSSSS3JYhEu4MpbksQiXcGUktyWIRLuDKSyLVs+rG4paLVs+rG4rRatn1Y3FLRatn1Y3FJaLVs+rG4pJJc/CxW6lPyeuSWhvJm7oR4Ekkl0L8VpabU7JWxWlptTslLYrS02p2SksWdn1v2KcS6NfcU2S5Ky3cU2S5KyS6VlnZ9b9inLWdn1v2KcktZ2fW/YpySWs7PrfsU7Wdn1v2KctZ2fW/YpyXPw8jCvW8FNtk3WHmchTJY7imyXJWQW7imyXJWW7imyXJWSSW7imyXJWSS3cU2S5KyW7imyXJWW7imyXJWS3cU2S5KyS6dV0H/Ok0skuhZXSD4UAdy6Ft3YTNCVqtd2EzQlapa7sJmhK1YAA", // nolint:lll
+ "FiYqRnIdAACAQEAAAAAWlSx4Dadc6Q14AAE4hgAGQBgAGP9gAGTpgAGFcMxyJvHg8gDyAvFs8e7yAPIC8f7yAPFu8fLyCvH28gL2yu0G8gDyAPII8f7yAPB88YryAQHx9X7yEQHx+vH68U7x+vH+8gLyAPGQ8YTyAPIA8fzyBPDC8iLyAvII8e7yBvGw8ijyCvHw8gTyAvHi8ezx4vIA8hDx9vHw8e7yAPH68gDyBvGA8cDyAPIA8gDyCvE68PTx+vIK8fbyCPBE25jqmPIW8fbyAPCk8gTx/vIC8gjx9vAe7VrpJPIG8gDx+u/i8gjyBvH48g8W8ftU8S8W8WTyAvH+8grx8PD+8erx9PH88f7yCPEu8fryAvH48gbx/Pco7K7x+PIG8gD26u0E8gDyAPIA8grx9vIK8cbyAPIA8gL3GuoU8cDyEPH28gDx+vF08dzx+vIG8gDyAPF88ibx1vIK8fzyAPE68b7yAvH+8fryBvH48g7x+PH88jTx0vHi72bx8PH+8gLyBvA48fzyAvIA8f7yAPIW8eryAPIA8gDyBvGy8fLx+vII8fjyAPHc8EryBvHy8gLx7PHe8XTyAPH68gbyCu+28fTx+PIA8gbyAPCY8fLx/PIE8gbx9PGU1/zt7PH48hLx7vHE8gDyAPIG8gDx+vVU7Sjx/vIC8f7x+vGC8frx9vIE8gLyAPEc8hLx+PH68gjx/vH68f7x+vIG8gDx+vD48fryBPIA8grx+PEi8fjx+PIG8grx9vCY3tTx+PIA8grx8vHu8ezyAPH+8gDx/vJg8PryAPIC8fjyAPE68aLyAvII8fjyAPHY8YzyAPIA8gDyAPDk8fbx8PIG8fryBvEo8ijx9vH88gzyAPHA8ijx6vIU8gDx+vFRAfHvfvH/AfIK8f7yAvHy8gzyAPIK8fDyDvDo8WbyHPIE8fjyDvDq6hbyAPH68gbyAPEw8fryAPIA8grx6PHK8fLyAPIG8gbx9PFC8fLyAQPyAXvyAQHyCwHxauyQ8gDyAPH48gjyCvHw8hDx9vH68gbwqOlO7NLx+PII8fDxZvGy8e7yAwHyAX7yBwHyAPG+8fTyAvH48g0B8OV+8cMB8gDx+vIG8frx0vGU8gbx+PII8gDzAPAo8f7yCPH88fjw3vGM8gTx8vIG8gDyHvHI8fsB8gd+8gMB8gjw4PH28fryAPIG8gjxIvIU8fbx+vIG8gDw1uxE8fryBvIC8f7xpPHY8gbyAPIA8gEC8Mt88UcC8hLx+PIBBfIHdvEhBfHQ8gDx+vIC8gTxkPH+8gLx/vIG8fsB8el+8WkB8fDyCvIQ8fbx/PHw8gbyAvH+8gD1bO0s8gLyBvIA8gTxavIA8gDyAvII8fDxKPEM8fDyAPIG8fzw0vHq8gLx/vIA8gD2Ju0W8fryEPHw8gbxgPH28gbyKPHO8gzxzN0s6q7yBvII9tTtDvIG8gbyAPIC8f7xaPIA8fDyEPH28grx+PFo8fryDPH68frxhPIC8gbyAPIB4AABWoM1ggP///qWNF28AAArU2PFsgDY/WyCNj/bWtAdYnv2yCQHY/myCQXbebtrM0F2OdsfDZAGyANkDbEU2LZsgDY/WyCNkBbH+2PlsgbZAGyCtsKa1u2P1sgbZAGyAtj8bGu2PtsgDY/WyANkDgOx237Y+IDsgDZBmx/Ntuaq+zvVsirY+2yAtkAbFC2PhsfbZAGx+tkAbE42QBsf7ZAGyCtj7bKu2JhsgbY/WyBtj9a5O2QdsfzY/2yCNj9bGu2O1sfzY/2yBtkAbG02MZsgDZA2x+tkDbHo2QBsgbY/WyBtkAbGm2CxsgrY+2x+tj/bFk2EVsfrY/2yCNkBbIg2O9sfrZA2x+tkAbEG1UlsfrZA2yANkBbIUztpsQzZAWx/tj9bE22QNsgbZBWx9tkBbEs2N9sgLY/2yANkDbHO2QdsgrY+2yANkFbFlAdjrv2yAQHZAGyCtj+a9W2QJsgDZAWx/tkAbGw2QFsgbZAGx+tkDbHu2PZsgDZAGx+NkEbGq2FVsgjY/Gx+QXZA7tseEF2QlsgDZAGx+tkWguy7XbYcoLsfkAA=", // nolint:lll
+ "FiYqRnIdAACAQEAAAAAnPgFYA+AABOIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJPFcvVHPFcpJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJPgAAGhz4AABno////L/JJJJJJJJJgAA==", // nolint:lll
+ "FiYqRnIdAACAQEAAAAAWlSx4DYa+fHfgAATiGAAgoYABJ1gABamAAJ9DLWSWyU46nCsw5GX05SG/Y2OWyCwqmgq2cGuK2q25IwqGLg1gmuS2uWlAypm0+2QIDrJX7YLgOxGyh2lo3k2/a1eW3Gwqmdq3BWvU4I2/6w1GXS3eXH01VGm0woGYu0jXEC02G1mxlGTY47Gyi18WpMwbGjW2zm0Kz8Wx2wy2fY1wmoO1wHGiwy2XO3fm3O1RmzEwKWCAzp2382R2/cwj2fw21nE81KWtmwVGGoxKG5Y0/moSxTmVg1lmri2P4tk9VTEhi2RA1OG7c1vmsKxnmWYzy238202rgwCmuS2C2wU0h252xbWQw4EWtO4dHA4wNWhM3hWc81fG+oyLGqS26G5s2dWpIxTmZK3/m0I2129CwX2kwzgGxY1EnBMxKmtG5Bmxy2D2rKxa2fS1GW1+11G64xNmmW1DG1e1Im86wImlA1rWpg2k20gwPGbO2SXI625nCQwqWgM1BXAw2nHMOxQmqA1x2ps2+2qUxbWZU2tGvi2s2tiwsmYo1Hmqo3bW6swhmSC2dWnY0rW7owRWgW2CWi62Qm5kwPmGKzyWuC1UHPAw5m9M3jm524Vmk8w7WSe2XHDC4TW7KwbGqE3BGsG1S3AQxpWXEz6Gto1MmyGwbmZU2H2+419mxMwHWcAzWHCO3AGsWxNWm82jW3W1KmxEwLmJQ2825wyzGsQw0Gae3v2oI0sm5ywa2Ue1ymve4FHEoxRWfQ38m2I2vWpmxI2ee1mWoe1R2yQw9Wos1AGvs0nW7gyAWZu2BGfg1+2sGxiGkW2GXlg3IHCqx4IDoyX7e5gOsg0/mruwQGhG1eW8c3Gm8Kws2ZS4ZnCy30WjKwX2UI2Bmui1oHCeyRmxk19mrI1W25Uw5WWo2HWvm2u2nuwrmr8zXoHqg3vgygOzlAcD9ikNTRxANflu2sYFjFtSJqRNJhrmsH9hTMnBtvNW1vKMK9i7t0huMQHWYv3DvAcXpqktctxBuGhruwHCxv2PPAdTRr0NHFtbsM1kytcZsaN7ZsNMN9oYNThsdtw9o9sLplmuHBrUuRttnMbln6tX+A6F9+0dIDwvMDJkWt4Rlwt2ZyDsklloN21r8tahndMIBq2OChq7thBu5sbRnzNhNvCtmpqrQLCTvmiLAtlxvXtqCC7bl2wQILovtoBpUNWFshsWtptNt9yCNqpsAwHBIv2n/AdmdpktyJtFMHBrANk9xRtWRsOMLpmQtt1qhNUBvqsO5lstDVqktmluDsfdq2NvNrVtw5sPsUFkrNWRyXumxscsRhiotmVoBtmVsgsEFmHtmtspNwBsSsGphKtDptKOctuOsV9mht69tFNpFqIMexlUtbNrONLJx6Mq5ldti5tQNAxqYMvhlAte5rCtczwAACtTYy///+pYxJvAAAK1Mc9wJtrlohuG5hLwHN6v2s9AdtpuNQXXtu2CTBdR1vgtkJtutEVhQsn5t+NV5jutQliKNGVwftxdsANuhgKtExtlNDlp7N1RhOMstuCNetr9uR6A4p1+ynIDs4NI1ubOB9gmMH9rttN9qGNwRgWM3pqOtdZo/OWlhvNI5stNhNr7txZjRsvtqgN1ZpcNUBjhseRwKt7lytNzBgStANl2NQBu0NLxh6NQJtMtmlqkuEhjstlBsDuHZqkNixhEs7lqnNppt5NuNiXMPxt4NyNvDNb9hyNU9rztxhsKNUNhAsi9rMtuNvINqdgMMM1lFuM5u5NlVgUs+BpAtydqyNstk4MlppstXZtTt8hhSs0NrBtcxqWNQtiOQHV3v2ihAdVRpONw1lLtZhrJNzRrLtntjRMlJrGNc5qCtF1izMtZtAtOdtoNhJhEMrRrxtFVv0QXW4u2MnBczBsfuYtrrtLmC4oN21nYLoqQAA==", // nolint:lll
+ "FiYqRnIdAACAQEAAAAAPGEnQDAfgAATiCSSSSSSSSSSSSSSSSSSSSSSSfAAAnEH///2PBJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJMBb8wFJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJgAA==", // nolint:lll
+ "FiYqRnIdAACAQEAAAAAarE7gA+AABOIJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJMAAA==", // nolint:lll
+ "FiYqRnIdAACAQEAAAAA94IfIDfYY0GwPwAAJxDAAH592AASFBMAAeXVYAA/OkGiuzWt4Dtu8tNrDNvACt9dFxQKt9lRx22OG9JwIlN+YFwUkN241uCOOBi1tzUN8F1u9zOC6pv9fNrUVvQot5dNv2vtzelurVuBihvh2t6cdv+Pt3lRwKeOKG1uHjN/bCA79Rd+3xD4DvJgtxP9vwEt8rlut/t5p9vjCOLHdwTTN0k1u2QN5j1wVWN5/RxHnt0OVuQkN7KFwsetm9xxfeN4pBuxpOBGtuE8NzABwc2OHS1wc9OAtZvmPt6N1wXJNtN5vjWt+DVsstt45Bv98t2w5xRTN2UxvLnt5/hvD3ODCJvWluAQBxadOBxhv/AOAGVxGgODJpvXcOB3VwapN7xJxN4uH7pvK5OI1Rt2/uAoiB704V631N4HwsjN6gJwmkuMEZwKZN7G9ujTNz7lwnkN6AtvLwN4Xhvv5t7ZJw6UtvS5w8xN/O5u6Stxi5wU5N25VwkaNy4xu3cOAEZwK6NvnBvWZN8KFuBXN6mFwvqN9/Zw5EOEZ5vJBN5yBw9IuTW5ueOt9BRv/aOCLVvxst99Jv6dtvNNu5juA5VwxVN8lJuYtt56tvwlt+BJw9vt79dwAqN5PhuyMNnopuzcN8WpvbKN6MZuaBNx09yyzt1blxItt6lpw50OEGtxZSN2LBwvat8BVvv+tyM5xGxt5xNwjSuGR1wV8OEuxv0Stz1Bx30ODlNvu0t14pvqzN66Rv85t2WFvhzuBBBvJgN6G9vlBNz4hv06N77NvFOt2/hvWZuD1ZuzmNu25vqEOR1Vu1StzdFv4SuKDluEON4mlu6juC4JvPTORxVvgdN8G1v7fN6epvTsN7pltzKuOeRuwyODvVw0tOB71vxGt+hVvYvt8plxfzN+UNwBoOJgtxZCt9R5woct6gpxBNt9U9wRdt4P1vkeNsP1wmzN43VvlYNzbtuwxt0jJuqQOG0hvzwN27Rv2uuGQNuaHOIeVvWVOEtBtgHN8NtybruAM9ws9N/s5w2ot711wEzN5n9w6ZN84JvaetoexwmhOI5puccNv1VuLaOBlRxAft04VupXN6d9v9ktvchv87tswVvieN4EBvVvuJ99vTvOHhlv1SOH81u7jNwFtuABN1lFxUZN0GNxMzN5PtwGmN9T9wE9t9plup6t/QRwV6OOltt5PN3Iluszt+elwZlt95BwmOuK1tvzaOC1duCANyk5wHfuD5RvT9wTeFLvG8BtBOCqRwZ4uKQBwszt/ZBv/pt7AZvDFtq/xtwSN9PaA7jy1+4nOIDuXWt3fJvnwNsoRvILNsG1v8UOBWFvlct9utwqMOKOxujKN4tZvmAtwhhvjgOMWhvQoN1xRu/Ct629uXtt3EhwAhN7CBvgwNwPhwpWttsBwexNwj1xGPN9sRu00N+Ydu4Zt/OtwFht9rxwzktwZ1w8vNyHZwzLtwLVuseN6hRu69t/ptvMct5tZuv/uEstwa8t/lluvTOEmpws3N9TRumCuAUhtc8NzvlvhCt7ftvYvt4YBurmt2gJv3jN1idv4eN6cxve9N2MVuliuPUpvg5t8V5vjxtuWJvE0t9ZFvKQOB91wsYt9CqA73Gjcd3vu7tvAt1SRwoHtu19xTbOTKdtsPuKkBvdft6ChxqHN8WdugRNy9VvEbOErBwBVt7kFwNpuHixvF9t8xNvjyt2KtuUltondvSftzpttyLt3zNu8nN0QZvEhN4WZxNrt2Jtt2NN3AtvoNNyUlvWouN1Fw+iN/ZBvs8tl65xiDtz1Zwm0NyxduGSOBsJua9t4otvEjt1G1vYVNyQduiFOCxdw+KuGLtuyrN6s9wTztyHhvy0uKfJueHuMZpvIHN0pZxU1N7KhuliuBtxwFgt5PJty0QLeb2vm8RtAt615v8TN2IdwJ4tzWBtHFuIYdvYwuATlx6ptxC1wD7t8D1v0RN9w5utAt8d9wmJN4kVxPrOGGVvM1t/BVvRON3cZyJiN3ltv5QN/FtvLLtrYBw3ZuHj5wiwt/v1usHN5e1wu+N3Wdw1qN9D1w8Yt52VwkQt4iZxJctvfVvvqOLyhxIBNyPRujWuD6htvQt9StvI+t/utw2Vt8ppxLAttYpvjeN/ShvpXNvChvfst7cVvuTuHlRt/aN53hv6JN44RwyYynfEh2u3sVZTvzIuAd5u7CON7BvBiNyMJvoGOQHBuneuDzltU1t4hVwDqN2yBwJ8N88Bu49t1mdvNfOBbZwS3uPfJwaNNzzdwPIN63xukQN+ItviCwHesev21zxAeDhRwiLtqQJuoFt7cFw2QtsBhwYMuPyVuklNtDVwfwt8P5wZxNzNpu+RtkcVwBLNp+Vvmzt/8Rwo/OIq5uKcNtLJuQxuBhxvMLN/r5wl1Nyzpts7tseZvmDuGLRt5LN8FhwlmORGxvqtOBPRvkIt6FBu64t8MdwSbt8S1woEt0Ctt7otuI9wyut0GRwNiOFppuoxuBAFwJAOKkZuNrOBqtt+KOA3NxaYN5WdtXxOCzdwrNN4uBwyzN9qlwjkN5CBwBHOQJFtnlOD6pw5tt2K9xl3ONI1wx+uC8mAA", // nolint:lll
+ "FiYqRnIdAACAQEAAAABGId9oDfcTD+KTwAAJxDAAJ9nmAAgRqMAAyOLYABNsmGkqKtGlSxai45t0OWyDGakfNqCDmpc7a1FhtxUW0JAagCBq4kWiuPaWIVuiqWx1DaZ8dpS42tOuau7NtH526t8adBRqMy2iVZabJtuBPoDuMH79pdk4DqL6Gn02aggVucnYFt0QL7qE0Wme4gOlusabFdtjBmuwRado5pei4Dp9lr9qEkIDuFg2yO4aVwppYX2oPOaYWFtkKm2OWaT3BpbpmnylanWZuI5m2XBab15puEWrJ4aSDBtt7W8VuayVFqRSGo68aX9RvhI22xIaNUVqXlmneXaOlpuUQGsIqaWoNqHvGlF1afUFtA8mwQ3aoENp6pWqtIay+1ssXG1j2aiLFqnZ2qTXaYa5uSFm9nYaW/tqnLmsU0ay+Rt2sG+vCaK2dqG4movralhdt+vm6W+aro9psSGi3/aXIZv2A21//aitVqG0YJps07xp5hIJtyDGq4lahdZrPB2omPabEhs3HG0nQacY9re2IDo9xb9qOc4DtrTm1Q3aNUVqrHWocCabaxtp7WtdEaYflq4FYDpn5L9pt04DuSY22gTawH9pMP2ng1gOqgMv25S4gO1kgaiLdpf62l0xaaTtu0ZG2Jwal7RpDT2lwaajbWA7kwq/bk5OA6Y8xqtWmnWOaaYls0J2xFVgOrKov2l0bgOlylaUGVvBlm5XIaiDlqyj2mHyareRuR92we7aPPJq44Wm0pakR9sznG6g3amm5qZ+2rCuarOVvZnG4NoaP19rOYGoXpameRt1bG05SafENpkjWmimafb5uFwW0Acagj9qIY2kdhap3tvQWmwEKadOFouV2sIraX+1tEcWwZ7a6+dqePmkkfgOk4dv24fIgWpYDv2nBvgOqscabjlqFd23mfgOylbv2oj2gWokrv2l5OgOpn4cAMxsNxWquhamiNqY+Gr6ubqCRsSsmpuvakdpp7aYDqnmr9vStYDrdPmqk6gOozNv2qYLgOsQxbwpNvfsGoc6alFFpT+mpnubhB1tEE2lTSav/hoYVWky0b72tuuammghak85qbDGqJBbbN9uOgoDqZW79qbJYDqg0mjIwbmVJtKpWrszgOpmuv2oi4gOnmbbYyFtYa2o6Yawi5pjvGojnbbG5ub/2pycafIxpZY2mJBgO/Szv2vr6gOmuzgOk8tv2nmBgOoEibcKdsfzmkSHahcVqN5Wicub5jxuoi2pqIgOrbpv2oM/gOn7abgLtuBN2nunazp1o0/GtrEbcRZt/0oDqkib9pHMIDqHxGmRvbsCtq88GrROaN2SA6Y0m/aVMeA7tBxuQvWoKMas3hrPTmojKbqsRq202lpxartdqZKYDqjr79s/7YDvOymnglao+5q5qmrC/g23crumwa4g2qgqaehtobo2mKEbwzlsWsGrlRaReNpOkGoYmbMD1vYDGi0zafyNq+32j5ybJT9uW9mrLSaVmlqAbWqzIbic9q1BWqDIahZJpWZ2nNhbkOttxMWjWKaf8hpnAWkA1bcd5stqmjjXal3ppTR4DrJeL9tieYDtL82o3YacAZq3B2plQbgM5thJGljMakTZorqmqSnbiFpr2qWn5ran9pp/qGk9gbgH9sd1mojAawPFpzL2nKDbYfhuYuGqdRaRa5qihGiwWbg21uRDmmFWajuZpnMGmipbmjBt222k90abMVqieWktEbB59vTxWq69aq4xqX22mAdbeXKA60BG/afuaA6TK5qNxGtW/b58dr+B2le2adQhpi1mnpEbLaptC+WxijaTpdqrRGrHbbpUBtuY2p4QaYuKCaXBq8aexeCboedqfCGn2Maq+KCaxO28abA6CbbRhty0mnV5aOa1oqvIDqvsr9sPNoDs532m+QaX7hpa/moolbaBRwqB2kGDaXnBpKHGqRQa+Z5u5UGqqba6w5pycGow2bHhxtVV2lTsaq4Vp3BWkh/bQNNta2WoGgacIVp1DGnfGbPZht5PWsiyaoWRqEwGmdlgO4Aov21sQgOmNBamh5rJv2jKvbtyps7AmqsoaphRqA7WnIlbVyRu4kWmb5aMNJpiJ2oyKa6F9uNi2mfDakyaA6Z7+/aX9qA7VVRsvkmrJuaqwmA6Lda/ayU2A7Xp9r+22oKdal5dqY24Dp0tb9uUtYDtSommY3achJpnNmjiJbYyNusUWlqpamUBqHYWo5/bW79s27mrZmahtFpqEmm8ubhK5ujlGnGHanJdpdYWsXXbza5vQ2mnqgamiFpxZGoLRb5/9uApmmaFacUBpiVmmmbb3dhrWRmnBAapKVpudGre6gOyNuv2uhPgOo3KahcBpgmGndWbcXluzgGpq7abVlp2poDqcFr9tdVYDuIbGmtqakJ9qT02pfvbg/dthuGkrPaV7FqCM2sNHbX9Vt1r4Douu79qKjYDqaPmrwybtK1se0GooVaSj9qDnmrVUb0/eA7AL6/asHmBaSge/6jS6/6hceA7RxdtDQmoK5aefRo/eGqwVborls13mk0/ak8qA6vRW/aS6GA7Z55ukIWmsWaLzlqve2rXJbRSVtZ62modaGH9qWs2jQAbzFxvBWWtgsalnVo/72iaEbSbOBbPYu+agDOBacCpqSLoJporLxsXrIJs8HWuUoaZPtpHzWlefbdO5u58mm7jat7Rpj5Gne3beCdorCmn3OafItqKYmmTTbHw9s2k2q4ZaeXBrIF4FqYcr5t8q4FuKkmm4Aa2Cpol42q7fbsKdtK2GtYGae19ouOWsWEbWTFuNfmpClaiXZqCnWrYgbWVJu9n2kt6apzFrDcmmGAb2lhsT3IDqR3b9qSSYAA=", // nolint:lll
+}
+
+func BenchmarkM3TSZEncode(b *testing.B) {
+ var (
+ encodingOpts = encoding.NewOptions()
+ seriesRun = prepareSampleSeriesEncRun(b)
+ encoder = NewEncoder(time.Now(), nil, DefaultIntOptimizationEnabled, encodingOpts)
+ )
+
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ run := seriesRun[i]
+ encoder.Reset(run[0].Timestamp, len(run), nil)
+
+ for i := range run {
+ // Using index access to avoid copying a 40 byte datapoint.
+ _ = encoder.Encode(run[i], xtime.Nanosecond, nil)
+ }
+
+ encoder.Discard()
+ }
+}
+
+func prepareSampleSeriesEncRun(b *testing.B) [][]ts.Datapoint {
+ var (
+ rnd = rand.New(rand.NewSource(42)) // nolint:gosec
+ sampleSeries = make([][]byte, 0, len(sampleSeriesBase64))
+ seriesRun = make([][]ts.Datapoint, b.N)
+ encodingOpts = encoding.NewOptions()
+ reader = bytes.NewReader(nil)
+ )
+
+ for _, b64 := range sampleSeriesBase64 {
+ data, err := base64.StdEncoding.DecodeString(b64)
+ require.NoError(b, err)
+
+ sampleSeries = append(sampleSeries, data)
+ }
+
+ for i := 0; i < len(seriesRun); i++ {
+ reader.Reset(sampleSeries[rnd.Intn(len(sampleSeries))])
+
+ iter := NewReaderIterator(reader, DefaultIntOptimizationEnabled, encodingOpts)
+ for iter.Next() {
+ dp, _, _ := iter.Current()
+ seriesRun[i] = append(seriesRun[i], dp)
+ }
+
+ require.NoError(b, iter.Err())
+ iter.Close()
+ }
+
+ return seriesRun
+}
diff --git a/src/dbnode/integration/commitlog_bootstrap_index_perf_speed_test.go b/src/dbnode/integration/commitlog_bootstrap_index_perf_speed_test.go
index 5d056e9750..ecc8e13428 100644
--- a/src/dbnode/integration/commitlog_bootstrap_index_perf_speed_test.go
+++ b/src/dbnode/integration/commitlog_bootstrap_index_perf_speed_test.go
@@ -33,13 +33,13 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/x/checked"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
diff --git a/src/dbnode/integration/generate/options.go b/src/dbnode/integration/generate/options.go
index 186f5dc7f6..fec5b7d7de 100644
--- a/src/dbnode/integration/generate/options.go
+++ b/src/dbnode/integration/generate/options.go
@@ -24,9 +24,9 @@ import (
"os"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
+ "github.com/m3db/m3/src/x/clock"
)
const (
diff --git a/src/dbnode/integration/generate/types.go b/src/dbnode/integration/generate/types.go
index 118dd867f7..f716fc412e 100644
--- a/src/dbnode/integration/generate/types.go
+++ b/src/dbnode/integration/generate/types.go
@@ -24,11 +24,11 @@ import (
"os"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
ns "github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/sharding"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
)
diff --git a/src/dbnode/integration/integration.go b/src/dbnode/integration/integration.go
index 31883a58fb..a5837fa673 100644
--- a/src/dbnode/integration/integration.go
+++ b/src/dbnode/integration/integration.go
@@ -272,9 +272,10 @@ func newDefaultBootstrappableTestSetups(
SetAdminClient(adminClient).
SetIndexOptions(storageIdxOpts).
SetFilesystemOptions(fsOpts).
- // DatabaseBlockRetrieverManager and PersistManager need to be set or we will never execute
+ // PersistManager need to be set or we will never execute
// the persist bootstrapping path
SetPersistManager(setup.StorageOpts().PersistManager()).
+ SetIndexClaimsManager(setup.StorageOpts().IndexClaimsManager()).
SetCompactor(newCompactor(t, storageIdxOpts)).
SetRuntimeOptionsManager(runtimeOptsMgr).
SetContextPool(setup.StorageOpts().ContextPool())
@@ -285,13 +286,14 @@ func newDefaultBootstrappableTestSetups(
persistMgr, err := persistfs.NewPersistManager(fsOpts)
require.NoError(t, err)
-
+ icm := persistfs.NewIndexClaimsManager(fsOpts)
bfsOpts := bfs.NewOptions().
SetResultOptions(bsOpts).
SetFilesystemOptions(fsOpts).
SetIndexOptions(storageIdxOpts).
SetCompactor(newCompactor(t, storageIdxOpts)).
- SetPersistManager(persistMgr)
+ SetPersistManager(persistMgr).
+ SetIndexClaimsManager(icm)
fsBootstrapper, err := bfs.NewFileSystemBootstrapperProvider(bfsOpts, finalBootstrapper)
require.NoError(t, err)
diff --git a/src/dbnode/integration/large_tiles_test.go b/src/dbnode/integration/large_tiles_test.go
index b8a32739ec..304077171c 100644
--- a/src/dbnode/integration/large_tiles_test.go
+++ b/src/dbnode/integration/large_tiles_test.go
@@ -42,15 +42,19 @@ import (
"go.uber.org/zap"
)
-var (
+const (
blockSize = 2 * time.Hour
blockSizeT = 24 * time.Hour
+ testDataPointsCount = 60
+)
+var (
gaugePayload = &annotation.Payload{MetricType: annotation.MetricType_GAUGE}
counterPayload = &annotation.Payload{MetricType: annotation.MetricType_COUNTER, HandleValueResets: true}
)
func TestReadAggregateWrite(t *testing.T) {
+ t.Skip("flaky")
var (
start = time.Now()
testSetup, srcNs, trgNs = setupServer(t)
@@ -67,19 +71,18 @@ func TestReadAggregateWrite(t *testing.T) {
session, err := testSetup.M3DBClient().DefaultSession()
require.NoError(t, err)
- nowFn := testSetup.NowFn()
+ nowFn := storageOpts.ClockOptions().NowFn()
// Write test data.
- dpTimeStart := nowFn().Truncate(blockSizeT).Add(-blockSizeT)
- dpTime := dpTimeStart
+ dpTimeStart := nowFn().Truncate(blockSizeT)
// "aab" ID is stored to the same shard 0 same as "foo", this is important
// for a test to store them to the same shard to test data consistency
err = session.WriteTagged(srcNs.ID(), ident.StringID("aab"),
ident.MustNewTagStringsIterator("__name__", "cpu", "job", "job1"),
- dpTime, 15, xtime.Second, annotationBytes(t, gaugePayload))
+ dpTimeStart, 15, xtime.Second, annotationBytes(t, gaugePayload))
- testDataPointsCount := 60
+ dpTime := dpTimeStart
for a := 0; a < testDataPointsCount; a++ {
if a < 10 {
dpTime = dpTime.Add(10 * time.Minute)
@@ -138,7 +141,7 @@ func TestReadAggregateWrite(t *testing.T) {
log.Info("waiting till aggregated data is readable")
start = time.Now()
readable := xclock.WaitUntil(func() bool {
- series, err := session.Fetch(trgNs.ID(), ident.StringID("foo"), dpTimeStart, nowFn())
+ series, err := session.Fetch(trgNs.ID(), ident.StringID("foo"), dpTimeStart, dpTimeStart.Add(blockSizeT))
require.NoError(t, err)
return series.Next()
}, time.Minute)
@@ -203,8 +206,8 @@ func fetchAndValidate(
func setupServer(t *testing.T) (TestSetup, namespace.Metadata, namespace.Metadata) {
var (
- rOpts = retention.NewOptions().SetRetentionPeriod(500 * blockSize).SetBlockSize(blockSize)
- rOptsT = retention.NewOptions().SetRetentionPeriod(100 * blockSize).SetBlockSize(blockSizeT).SetBufferPast(0)
+ rOpts = retention.NewOptions().SetRetentionPeriod(500 * blockSize).SetBlockSize(blockSize).SetBufferPast(0).SetBufferFuture(0)
+ rOptsT = retention.NewOptions().SetRetentionPeriod(100 * blockSize).SetBlockSize(blockSizeT).SetBufferPast(0).SetBufferFuture(0)
idxOpts = namespace.NewIndexOptions().SetEnabled(true).SetBlockSize(blockSize)
idxOptsT = namespace.NewIndexOptions().SetEnabled(true).SetBlockSize(blockSizeT)
nsOpts = namespace.NewOptions().
@@ -215,7 +218,7 @@ func setupServer(t *testing.T) (TestSetup, namespace.Metadata, namespace.Metadat
SetRetentionOptions(rOptsT).
SetIndexOptions(idxOptsT)
- fixedNow = time.Now().Truncate(blockSizeT)
+ fixedNow = time.Now().Truncate(blockSizeT).Add(11*blockSize)
)
srcNs, err := namespace.NewMetadata(testNamespaces[0], nsOpts)
diff --git a/src/dbnode/integration/setup.go b/src/dbnode/integration/setup.go
index cf02d7c939..19d391acf2 100644
--- a/src/dbnode/integration/setup.go
+++ b/src/dbnode/integration/setup.go
@@ -34,7 +34,6 @@ import (
"github.com/m3db/m3/src/cluster/services"
"github.com/m3db/m3/src/cluster/shard"
"github.com/m3db/m3/src/dbnode/client"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/integration/fake"
"github.com/m3db/m3/src/dbnode/integration/generate"
@@ -56,6 +55,7 @@ import (
"github.com/m3db/m3/src/dbnode/testdata/prototest"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
xsync "github.com/m3db/m3/src/x/sync"
@@ -380,7 +380,8 @@ func NewTestSetup(
if fsOpts == nil {
fsOpts = fs.NewOptions().
- SetFilePathPrefix(filePathPrefix)
+ SetFilePathPrefix(filePathPrefix).
+ SetClockOptions(storageOpts.ClockOptions())
}
storageOpts = storageOpts.SetCommitLogOptions(
@@ -394,6 +395,10 @@ func NewTestSetup(
}
storageOpts = storageOpts.SetPersistManager(pm)
+ // Set up index claims manager
+ icm := fs.NewIndexClaimsManager(fsOpts)
+ storageOpts = storageOpts.SetIndexClaimsManager(icm)
+
// Set up repair options
storageOpts = storageOpts.
SetRepairOptions(storageOpts.RepairOptions().
@@ -931,6 +936,7 @@ func (ts *testSetup) InitializeBootstrappers(opts InitializeBootstrappersOptions
if err != nil {
return err
}
+ icm := fs.NewIndexClaimsManager(fsOpts)
storageIdxOpts := storageOpts.IndexOptions()
compactor, err := newCompactorWithErr(storageIdxOpts)
if err != nil {
@@ -941,6 +947,7 @@ func (ts *testSetup) InitializeBootstrappers(opts InitializeBootstrappersOptions
SetFilesystemOptions(fsOpts).
SetIndexOptions(storageIdxOpts).
SetPersistManager(persistMgr).
+ SetIndexClaimsManager(icm).
SetCompactor(compactor)
bs, err = bfs.NewFileSystemBootstrapperProvider(bfsOpts, bs)
if err != nil {
diff --git a/src/dbnode/namespace/kvadmin/ns_admin.go b/src/dbnode/namespace/kvadmin/ns_admin.go
index 672ef55a62..f5ba819c4a 100644
--- a/src/dbnode/namespace/kvadmin/ns_admin.go
+++ b/src/dbnode/namespace/kvadmin/ns_admin.go
@@ -24,17 +24,18 @@ import (
"errors"
"fmt"
+ uuid "github.com/satori/go.uuid"
+
"github.com/m3db/m3/src/cluster/kv"
nsproto "github.com/m3db/m3/src/dbnode/generated/proto/namespace"
"github.com/m3db/m3/src/dbnode/namespace"
xerrors "github.com/m3db/m3/src/x/errors"
-
- "github.com/satori/go.uuid"
)
var (
- ErrNotImplemented = errors.New("api not implemented")
+ // ErrNamespaceNotFound is returned when namespace is not found in registry.
ErrNamespaceNotFound = errors.New("namespace is not found")
+ // ErrNamespaceAlreadyExist is returned for addition of a namespace which already exists.
ErrNamespaceAlreadyExist = errors.New("namespace already exists")
)
@@ -154,8 +155,58 @@ func (as *adminService) Set(name string, options *nsproto.NamespaceOptions) erro
}
func (as *adminService) Delete(name string) error {
- // TODO [haijun] move logic from src/query/api/v1/handler/namespace here
- return ErrNotImplemented
+ currentRegistry, currentVersion, err := as.currentRegistry()
+ if err != nil {
+ return xerrors.Wrapf(err, "failed to load current namespace metadatas for %s", as.key)
+ }
+
+ nsMap, err := namespace.FromProto(*currentRegistry)
+ if err != nil {
+ return xerrors.Wrap(err, "failed to unmarshal namespace registry")
+ }
+
+ metadatas := nsMap.Metadatas()
+ mdIdx := -1
+ for idx, md := range nsMap.Metadatas() {
+ if md.ID().String() == name {
+ mdIdx = idx
+
+ break
+ }
+ }
+
+ if mdIdx == -1 {
+ return ErrNamespaceNotFound
+ }
+
+ if len(metadatas) == 1 {
+ if _, err := as.store.Delete(as.key); err != nil {
+ return xerrors.Wrap(err, "failed to delete kv key")
+ }
+
+ return nil
+ }
+
+ // Replace the index where we found the metadata with the last element, then truncate
+ metadatas[mdIdx] = metadatas[len(metadatas)-1]
+ metadatas = metadatas[:len(metadatas)-1]
+
+ newMap, err := namespace.NewMap(metadatas)
+ if err != nil {
+ return xerrors.Wrap(err, "namespace map construction failed")
+ }
+
+ protoMap, err := namespace.ToProto(newMap)
+ if err != nil {
+ return xerrors.Wrap(err, "namespace registry proto conversion failed")
+ }
+
+ _, err = as.store.CheckAndSet(as.key, currentVersion, protoMap)
+ if err != nil {
+ return xerrors.Wrapf(err, "failed to delete namespace %v", name)
+ }
+
+ return nil
}
func (as *adminService) ResetSchema(name string) error {
@@ -239,7 +290,6 @@ func (as *adminService) currentRegistry() (*nsproto.Registry, int, error) {
return &protoRegistry, value.Version(), nil
}
-
func LoadSchemaRegistryFromKVStore(schemaReg namespace.SchemaRegistry, kvStore kv.Store) error {
if kvStore == nil {
return errors.New("m3db metadata store is not configured properly")
diff --git a/src/dbnode/namespace/kvadmin/ns_admin_test.go b/src/dbnode/namespace/kvadmin/ns_admin_test.go
index 29b0dc073e..a782f20474 100644
--- a/src/dbnode/namespace/kvadmin/ns_admin_test.go
+++ b/src/dbnode/namespace/kvadmin/ns_admin_test.go
@@ -21,16 +21,17 @@
package kvadmin
import (
+ "errors"
"testing"
"github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/require"
+
"github.com/m3db/m3/src/cluster/kv"
+ "github.com/m3db/m3/src/cluster/kv/mem"
nsproto "github.com/m3db/m3/src/dbnode/generated/proto/namespace"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/x/ident"
-
- "github.com/stretchr/testify/require"
- "github.com/m3db/m3/src/cluster/kv/mem"
)
const (
@@ -61,6 +62,9 @@ message ImportedMessage {
bytes deliveryID = 4;
}
`
+
+ nsRegKey = "nsRegKey"
+ testNamespaceID = "test-namespace"
)
func TestAdminService_DeploySchema(t *testing.T) {
@@ -68,8 +72,7 @@ func TestAdminService_DeploySchema(t *testing.T) {
defer ctrl.Finish()
storeMock := kv.NewMockStore(ctrl)
- var nsRegKey = "nsRegKey"
- as := NewAdminService(storeMock, nsRegKey, func() string {return "first"})
+ as := NewAdminService(storeMock, nsRegKey, func() string { return testNamespaceID })
require.NotNil(t, as)
currentMeta, err := namespace.NewMetadata(ident.StringID("ns1"), namespace.NewOptions())
@@ -83,7 +86,8 @@ func TestAdminService_DeploySchema(t *testing.T) {
protoMsg := "mainpkg.TestMessage"
protoMap := map[string]string{protoFile: mainProtoStr, "mainpkg/imported.proto": importedProtoStr}
- expectedSchemaOpt, err := namespace.AppendSchemaOptions(nil, protoFile, protoMsg, protoMap, "first")
+ expectedSchemaOpt, err := namespace.
+ AppendSchemaOptions(nil, protoFile, protoMsg, protoMap, testNamespaceID)
require.NoError(t, err)
expectedSh, err := namespace.LoadSchemaHistory(expectedSchemaOpt)
require.NoError(t, err)
@@ -115,14 +119,14 @@ func TestAdminService_ResetSchema(t *testing.T) {
defer ctrl.Finish()
storeMock := kv.NewMockStore(ctrl)
- var nsRegKey = "nsRegKey"
- as := NewAdminService(storeMock, nsRegKey, func() string {return "first"})
+ as := NewAdminService(storeMock, nsRegKey, func() string { return testNamespaceID })
require.NotNil(t, as)
protoFile := "mainpkg/test.proto"
protoMsg := "mainpkg.TestMessage"
protoMap := map[string]string{protoFile: mainProtoStr, "mainpkg/imported.proto": importedProtoStr}
- currentSchemaOpt, err := namespace.AppendSchemaOptions(nil, protoFile, protoMsg, protoMap, "first")
+ currentSchemaOpt, err := namespace.
+ AppendSchemaOptions(nil, protoFile, protoMsg, protoMap, testNamespaceID)
require.NoError(t, err)
currentSchemaHist, err := namespace.LoadSchemaHistory(currentSchemaOpt)
require.NoError(t, err)
@@ -163,8 +167,7 @@ func TestAdminService_Crud(t *testing.T) {
defer ctrl.Finish()
store := mem.NewStore()
- var nsRegKey = "nsRegKey"
- as := NewAdminService(store, nsRegKey, func() string {return "first"})
+ as := NewAdminService(store, nsRegKey, func() string { return testNamespaceID })
require.NotNil(t, as)
expectedOpt := namespace.NewOptions()
@@ -176,6 +179,7 @@ func TestAdminService_Crud(t *testing.T) {
require.Error(t, as.Add("ns1", optProto))
require.NoError(t, as.Set("ns1", optProto))
require.Error(t, as.Set("ns2", optProto))
+ require.NoError(t, as.Add("ns3", optProto))
nsOpt, err := as.Get("ns1")
require.NoError(t, err)
@@ -189,5 +193,174 @@ func TestAdminService_Crud(t *testing.T) {
nsReg, err := as.GetAll()
require.NoError(t, err)
+ require.Len(t, nsReg.Namespaces, 2)
+
+ err = as.Delete("ns1")
+ require.NoError(t, err)
+
+ nsReg, err = as.GetAll()
+ require.NoError(t, err)
require.Len(t, nsReg.Namespaces, 1)
}
+
+func TestAdminService_DeleteOneNamespace(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ storeMock := kv.NewMockStore(ctrl)
+ as := NewAdminService(storeMock, nsRegKey, func() string { return testNamespaceID })
+
+ currentMeta1, err := namespace.NewMetadata(ident.StringID("ns1"), namespace.NewOptions())
+ require.NoError(t, err)
+ currentMeta2, err := namespace.NewMetadata(ident.StringID("ns2"), namespace.NewOptions())
+ require.NoError(t, err)
+
+ currentMap, err := namespace.NewMap([]namespace.Metadata{currentMeta1, currentMeta2})
+ require.NoError(t, err)
+ currentReg, err := namespace.ToProto(currentMap)
+ require.NoError(t, err)
+
+ expectedMeta, err := namespace.NewMetadata(ident.StringID("ns2"), namespace.NewOptions())
+ require.NoError(t, err)
+ expectedMap, err := namespace.NewMap([]namespace.Metadata{expectedMeta})
+ require.NoError(t, err)
+
+ mValue := kv.NewMockValue(ctrl)
+ mValue.EXPECT().Unmarshal(gomock.Any()).Return(nil).Do(func(reg *nsproto.Registry) {
+ *reg = *currentReg
+ })
+ mValue.EXPECT().Version().Return(1)
+ storeMock.EXPECT().Get(nsRegKey).Return(mValue, nil)
+ storeMock.EXPECT().CheckAndSet(nsRegKey, 1, gomock.Any()).Return(2, nil).Do(
+ func(k string, version int, actualReg *nsproto.Registry) {
+ actualMap, err := namespace.FromProto(*actualReg)
+ require.NoError(t, err)
+ require.True(t, actualMap.Equal(expectedMap))
+ },
+ )
+
+ err = as.Delete("ns1")
+ require.NoError(t, err)
+}
+
+func TestAdminService_DeleteOneNamespaceFailedSetting(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ storeMock := kv.NewMockStore(ctrl)
+ as := NewAdminService(storeMock, nsRegKey, func() string { return testNamespaceID })
+
+ currentMeta1, err := namespace.NewMetadata(ident.StringID("ns1"), namespace.NewOptions())
+ require.NoError(t, err)
+ currentMeta2, err := namespace.NewMetadata(ident.StringID("ns2"), namespace.NewOptions())
+ require.NoError(t, err)
+
+ currentMap, err := namespace.NewMap([]namespace.Metadata{currentMeta1, currentMeta2})
+ require.NoError(t, err)
+ currentReg, err := namespace.ToProto(currentMap)
+ require.NoError(t, err)
+
+ mValue := kv.NewMockValue(ctrl)
+ mValue.EXPECT().Unmarshal(gomock.Any()).Return(nil).Do(func(reg *nsproto.Registry) {
+ *reg = *currentReg
+ })
+ mValue.EXPECT().Version().Return(1)
+ storeMock.EXPECT().Get(nsRegKey).Return(mValue, nil)
+ storeMock.EXPECT().CheckAndSet(nsRegKey, 1, gomock.Any()).Return(-1, errors.New("some error"))
+
+ err = as.Delete("ns1")
+ require.Error(t, err)
+}
+
+func TestAdminService_DeleteLastNamespace(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ storeMock := kv.NewMockStore(ctrl)
+ as := NewAdminService(storeMock, nsRegKey, func() string { return testNamespaceID })
+
+ currentMeta, err := namespace.NewMetadata(ident.StringID("ns1"), namespace.NewOptions())
+ require.NoError(t, err)
+
+ currentMap, err := namespace.NewMap([]namespace.Metadata{currentMeta})
+ require.NoError(t, err)
+ currentReg, err := namespace.ToProto(currentMap)
+ require.NoError(t, err)
+
+ mValue := kv.NewMockValue(ctrl)
+ mValue.EXPECT().Unmarshal(gomock.Any()).Return(nil).Do(func(reg *nsproto.Registry) {
+ *reg = *currentReg
+ })
+ mValue.EXPECT().Version().Return(1)
+ storeMock.EXPECT().Get(nsRegKey).Return(mValue, nil)
+ storeMock.EXPECT().Delete(nsRegKey).Return(nil, nil)
+
+ err = as.Delete("ns1")
+ require.NoError(t, err)
+}
+
+func TestAdminService_DeleteLastNamespaceFailed(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ storeMock := kv.NewMockStore(ctrl)
+ as := NewAdminService(storeMock, nsRegKey, func() string { return testNamespaceID })
+
+ currentMeta, err := namespace.NewMetadata(ident.StringID("ns1"), namespace.NewOptions())
+ require.NoError(t, err)
+
+ currentMap, err := namespace.NewMap([]namespace.Metadata{currentMeta})
+ require.NoError(t, err)
+ currentReg, err := namespace.ToProto(currentMap)
+ require.NoError(t, err)
+
+ mValue := kv.NewMockValue(ctrl)
+ mValue.EXPECT().Unmarshal(gomock.Any()).Return(nil).Do(func(reg *nsproto.Registry) {
+ *reg = *currentReg
+ })
+ mValue.EXPECT().Version().Return(1)
+ storeMock.EXPECT().Get(nsRegKey).Return(mValue, nil)
+ storeMock.EXPECT().Delete(nsRegKey).Return(nil, errors.New("some error"))
+
+ err = as.Delete("ns1")
+ require.Error(t, err)
+}
+
+func TestAdminService_DeleteMissingNamespace(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ storeMock := kv.NewMockStore(ctrl)
+ as := NewAdminService(storeMock, nsRegKey, func() string { return testNamespaceID })
+
+ currentMeta, err := namespace.NewMetadata(ident.StringID("ns1"), namespace.NewOptions())
+ require.NoError(t, err)
+
+ currentMap, err := namespace.NewMap([]namespace.Metadata{currentMeta})
+ require.NoError(t, err)
+ currentReg, err := namespace.ToProto(currentMap)
+ require.NoError(t, err)
+
+ mValue := kv.NewMockValue(ctrl)
+ mValue.EXPECT().Unmarshal(gomock.Any()).Return(nil).Do(func(reg *nsproto.Registry) {
+ *reg = *currentReg
+ })
+ mValue.EXPECT().Version().Return(1)
+ storeMock.EXPECT().Get(nsRegKey).Return(mValue, nil)
+
+ err = as.Delete("missing-namespace")
+ require.EqualError(t, ErrNamespaceNotFound, err.Error())
+}
+
+func TestAdminService_DeleteNilRegistry(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ storeMock := kv.NewMockStore(ctrl)
+ as := NewAdminService(storeMock, nsRegKey, func() string { return testNamespaceID })
+
+ storeMock.EXPECT().Get(nsRegKey).Return(nil, errors.New("some error"))
+
+ err := as.Delete("missing-namespace")
+ require.Error(t, err)
+}
diff --git a/src/dbnode/namespace/namespace_mock.go b/src/dbnode/namespace/namespace_mock.go
index 1807675287..4fba8bd53d 100644
--- a/src/dbnode/namespace/namespace_mock.go
+++ b/src/dbnode/namespace/namespace_mock.go
@@ -30,9 +30,9 @@ import (
"github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/dbnode/retention"
- "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
+ "github.com/m3db/m3/src/x/resource"
"github.com/gogo/protobuf/proto"
"github.com/golang/mock/gomock"
@@ -879,10 +879,10 @@ func (mr *MockSchemaRegistryMockRecorder) SetSchemaHistory(id, history interface
}
// RegisterListener mocks base method
-func (m *MockSchemaRegistry) RegisterListener(id ident.ID, listener SchemaListener) (close.SimpleCloser, error) {
+func (m *MockSchemaRegistry) RegisterListener(id ident.ID, listener SchemaListener) (resource.SimpleCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RegisterListener", id, listener)
- ret0, _ := ret[0].(close.SimpleCloser)
+ ret0, _ := ret[0].(resource.SimpleCloser)
ret1, _ := ret[1].(error)
return ret0, ret1
}
diff --git a/src/dbnode/namespace/namespace_runtime_options.go b/src/dbnode/namespace/namespace_runtime_options.go
index 8531cc6eaa..26538c7a7a 100644
--- a/src/dbnode/namespace/namespace_runtime_options.go
+++ b/src/dbnode/namespace/namespace_runtime_options.go
@@ -23,7 +23,7 @@ package namespace
import (
"sync"
- xclose "github.com/m3db/m3/src/x/close"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/m3db/m3/src/x/watch"
)
@@ -88,7 +88,7 @@ type RuntimeOptionsManager interface {
// RegisterListener registers a listener for updates to runtime options,
// it will synchronously call back the listener when this method is called
// to deliver the current set of runtime options.
- RegisterListener(l RuntimeOptionsListener) xclose.SimpleCloser
+ RegisterListener(l RuntimeOptionsListener) xresource.SimpleCloser
// Close closes the watcher and all descendent watches.
Close()
@@ -228,7 +228,7 @@ func (w *runtimeOptionsManager) Get() RuntimeOptions {
func (w *runtimeOptionsManager) RegisterListener(
listener RuntimeOptionsListener,
-) xclose.SimpleCloser {
+) xresource.SimpleCloser {
_, watch, _ := w.watchable.Watch()
// We always initialize the watchable so always read
diff --git a/src/dbnode/namespace/schema_registry.go b/src/dbnode/namespace/schema_registry.go
index 014f19d4d2..08e8829cbb 100644
--- a/src/dbnode/namespace/schema_registry.go
+++ b/src/dbnode/namespace/schema_registry.go
@@ -24,8 +24,8 @@ import (
"fmt"
"sync"
- xclose "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/ident"
+ xresource "github.com/m3db/m3/src/x/resource"
xwatch "github.com/m3db/m3/src/x/watch"
"go.uber.org/zap"
@@ -145,7 +145,7 @@ func (sr *schemaRegistry) getSchemaHistory(nsIDStr string) (SchemaHistory, error
func (sr *schemaRegistry) RegisterListener(
nsID ident.ID,
listener SchemaListener,
-) (xclose.SimpleCloser, error) {
+) (xresource.SimpleCloser, error) {
if !sr.protoEnabled {
return nil, nil
}
diff --git a/src/dbnode/namespace/types.go b/src/dbnode/namespace/types.go
index 39b71a1154..8afe60ea8d 100644
--- a/src/dbnode/namespace/types.go
+++ b/src/dbnode/namespace/types.go
@@ -25,9 +25,9 @@ import (
"github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/dbnode/retention"
- xclose "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/gogo/protobuf/proto"
)
@@ -205,7 +205,7 @@ type SchemaRegistry interface {
// RegisterListener registers a schema listener for the namespace.
// If proto is not enabled, nil, nil is returned
- RegisterListener(id ident.ID, listener SchemaListener) (xclose.SimpleCloser, error)
+ RegisterListener(id ident.ID, listener SchemaListener) (xresource.SimpleCloser, error)
// Close closes all the listeners.
Close()
diff --git a/src/dbnode/network/server/httpjson/cluster/server.go b/src/dbnode/network/server/httpjson/cluster/server.go
index 2ccd905580..70d62a9e43 100644
--- a/src/dbnode/network/server/httpjson/cluster/server.go
+++ b/src/dbnode/network/server/httpjson/cluster/server.go
@@ -28,8 +28,8 @@ import (
ns "github.com/m3db/m3/src/dbnode/network/server"
"github.com/m3db/m3/src/dbnode/network/server/httpjson"
ttcluster "github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/cluster"
- xclose "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/context"
+ xresource "github.com/m3db/m3/src/x/resource"
)
type server struct {
@@ -83,6 +83,6 @@ func (s *server) ListenAndServe() (ns.Close, error) {
return func() {
listener.Close()
- xclose.TryClose(service)
+ xresource.TryClose(service) // nolint: errcheck
}, nil
}
diff --git a/src/dbnode/network/server/tchannelthrift/cluster/server.go b/src/dbnode/network/server/tchannelthrift/cluster/server.go
index 0790618a05..5b05af00e8 100644
--- a/src/dbnode/network/server/tchannelthrift/cluster/server.go
+++ b/src/dbnode/network/server/tchannelthrift/cluster/server.go
@@ -25,8 +25,8 @@ import (
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
ns "github.com/m3db/m3/src/dbnode/network/server"
"github.com/m3db/m3/src/dbnode/network/server/tchannelthrift"
- xclose "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/context"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/uber/tchannel-go"
)
@@ -76,6 +76,6 @@ func (s *server) ListenAndServe() (ns.Close, error) {
return func() {
channel.Close()
- xclose.TryClose(service)
+ xresource.TryClose(service) // nolint: errcheck
}, nil
}
diff --git a/src/dbnode/network/server/tchannelthrift/node/service.go b/src/dbnode/network/server/tchannelthrift/node/service.go
index 19a973c317..871dd91b20 100644
--- a/src/dbnode/network/server/tchannelthrift/node/service.go
+++ b/src/dbnode/network/server/tchannelthrift/node/service.go
@@ -29,7 +29,6 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/client"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/network/server/tchannelthrift"
"github.com/m3db/m3/src/dbnode/network/server/tchannelthrift/convert"
@@ -42,6 +41,7 @@ import (
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
"github.com/m3db/m3/src/x/checked"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xdebug "github.com/m3db/m3/src/x/debug"
xerrors "github.com/m3db/m3/src/x/errors"
@@ -49,7 +49,7 @@ import (
"github.com/m3db/m3/src/x/instrument"
xopentracing "github.com/m3db/m3/src/x/opentracing"
"github.com/m3db/m3/src/x/pool"
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/m3db/m3/src/x/serialize"
xtime "github.com/m3db/m3/src/x/time"
@@ -2302,7 +2302,7 @@ func (s *service) readEncodedResult(
segments := s.pools.segmentsArray.Get()
segments = segmentsArr(segments).grow(len(encoded))
segments = segments[:0]
- ctx.RegisterFinalizer(resource.FinalizerFn(func() {
+ ctx.RegisterFinalizer(xresource.FinalizerFn(func() {
s.pools.segmentsArray.Put(segments)
}))
diff --git a/src/dbnode/network/server/tchannelthrift/options.go b/src/dbnode/network/server/tchannelthrift/options.go
index 4b59a61de0..6e8581e89d 100644
--- a/src/dbnode/network/server/tchannelthrift/options.go
+++ b/src/dbnode/network/server/tchannelthrift/options.go
@@ -21,9 +21,9 @@
package tchannelthrift
import (
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/x/xpool"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
diff --git a/src/dbnode/network/server/tchannelthrift/types.go b/src/dbnode/network/server/tchannelthrift/types.go
index 092561257b..9bd0ce03aa 100644
--- a/src/dbnode/network/server/tchannelthrift/types.go
+++ b/src/dbnode/network/server/tchannelthrift/types.go
@@ -21,9 +21,9 @@
package tchannelthrift
import (
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/x/xpool"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/serialize"
diff --git a/src/dbnode/persist/fs/commitlog/commit_log.go b/src/dbnode/persist/fs/commitlog/commit_log.go
index 40ab3b8298..0281628aa7 100644
--- a/src/dbnode/persist/fs/commitlog/commit_log.go
+++ b/src/dbnode/persist/fs/commitlog/commit_log.go
@@ -27,11 +27,11 @@ import (
"sync/atomic"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/ts/writes"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
xtime "github.com/m3db/m3/src/x/time"
diff --git a/src/dbnode/persist/fs/commitlog/commit_log_mock.go b/src/dbnode/persist/fs/commitlog/commit_log_mock.go
index fe0fa8c15c..f811ffa927 100644
--- a/src/dbnode/persist/fs/commitlog/commit_log_mock.go
+++ b/src/dbnode/persist/fs/commitlog/commit_log_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/m3db/m3/src/dbnode/persist/fs/commitlog/types.go
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -28,11 +28,11 @@ import (
"reflect"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/ts/writes"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
diff --git a/src/dbnode/persist/fs/commitlog/commit_log_test.go b/src/dbnode/persist/fs/commitlog/commit_log_test.go
index ea4894aeaa..00f0601a8a 100644
--- a/src/dbnode/persist/fs/commitlog/commit_log_test.go
+++ b/src/dbnode/persist/fs/commitlog/commit_log_test.go
@@ -34,12 +34,12 @@ import (
"time"
"github.com/m3db/bitset"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/x/checked"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
diff --git a/src/dbnode/persist/fs/commitlog/options.go b/src/dbnode/persist/fs/commitlog/options.go
index 7e43231a4d..fee984e9a6 100644
--- a/src/dbnode/persist/fs/commitlog/options.go
+++ b/src/dbnode/persist/fs/commitlog/options.go
@@ -26,8 +26,8 @@ import (
"runtime"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/persist/fs"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
diff --git a/src/dbnode/persist/fs/commitlog/types.go b/src/dbnode/persist/fs/commitlog/types.go
index 05ff965db9..ee54adea95 100644
--- a/src/dbnode/persist/fs/commitlog/types.go
+++ b/src/dbnode/persist/fs/commitlog/types.go
@@ -23,11 +23,11 @@ package commitlog
import (
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/ts/writes"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
diff --git a/src/dbnode/persist/fs/commitlog/writer.go b/src/dbnode/persist/fs/commitlog/writer.go
index 864184e533..2040cd38d6 100644
--- a/src/dbnode/persist/fs/commitlog/writer.go
+++ b/src/dbnode/persist/fs/commitlog/writer.go
@@ -28,13 +28,13 @@ import (
"os"
"github.com/m3db/bitset"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/digest"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/persist/fs/msgpack"
"github.com/m3db/m3/src/dbnode/persist/schema"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/x/clock"
xos "github.com/m3db/m3/src/x/os"
xtime "github.com/m3db/m3/src/x/time"
)
diff --git a/src/dbnode/persist/fs/files.go b/src/dbnode/persist/fs/files.go
index 1de9a425d6..a0017e8075 100644
--- a/src/dbnode/persist/fs/files.go
+++ b/src/dbnode/persist/fs/files.go
@@ -38,7 +38,6 @@ import (
"github.com/m3db/m3/src/dbnode/persist/fs/msgpack"
"github.com/m3db/m3/src/dbnode/persist/schema"
idxpersist "github.com/m3db/m3/src/m3ninx/persist"
- xclose "github.com/m3db/m3/src/x/close"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -360,17 +359,6 @@ func openFiles(opener fileOpener, fds map[string]**os.File) error {
return firstErr
}
-// TODO(xichen): move closeAll to m3x/close.
-func closeAll(closers ...xclose.Closer) error {
- multiErr := xerrors.NewMultiError()
- for _, closer := range closers {
- if err := closer.Close(); err != nil {
- multiErr = multiErr.Add(err)
- }
- }
- return multiErr.FinalError()
-}
-
// DeleteFiles delete a set of files, returning all the errors encountered during
// the deletion process.
func DeleteFiles(filePaths []string) error {
diff --git a/src/dbnode/persist/fs/files_test.go b/src/dbnode/persist/fs/files_test.go
index 60a4b68dd0..556bf9efb2 100644
--- a/src/dbnode/persist/fs/files_test.go
+++ b/src/dbnode/persist/fs/files_test.go
@@ -39,6 +39,7 @@ import (
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/pborman/uuid"
"github.com/stretchr/testify/assert"
@@ -88,7 +89,7 @@ func TestCloseAllFails(t *testing.T) {
defer os.Remove(file.Name())
assert.NoError(t, file.Close())
- assert.Error(t, closeAll(file))
+ assert.Error(t, xresource.CloseAll(file))
}
func TestDeleteFiles(t *testing.T) {
diff --git a/src/dbnode/persist/fs/index_claims_manager.go b/src/dbnode/persist/fs/index_claims_manager.go
new file mode 100644
index 0000000000..e7d42f5cc8
--- /dev/null
+++ b/src/dbnode/persist/fs/index_claims_manager.go
@@ -0,0 +1,128 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fs
+
+import (
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/retention"
+ "github.com/m3db/m3/src/x/clock"
+ "github.com/m3db/m3/src/x/ident"
+ xtime "github.com/m3db/m3/src/x/time"
+)
+
+var errOutOfRetentionClaim = errors.New("out of retention index volume claim")
+
+type indexClaimsManager struct {
+ sync.Mutex
+
+ filePathPrefix string
+ nowFn clock.NowFn
+ nextIndexFileSetVolumeIndexFn nextIndexFileSetVolumeIndexFn
+
+ // Map of ns ID string -> blockStart -> volumeIndexClaim.
+ volumeIndexClaims map[string]map[xtime.UnixNano]volumeIndexClaim
+}
+
+type volumeIndexClaim struct {
+ volumeIndex int
+}
+
+// NewIndexClaimsManager returns an instance of the index claim manager. This manages
+// concurrent claims for volume indices per ns and block start.
+// NB(bodu): There should be only a single shared index claim manager among all threads
+// writing index data filesets.
+func NewIndexClaimsManager(opts Options) IndexClaimsManager {
+ return &indexClaimsManager{
+ filePathPrefix: opts.FilePathPrefix(),
+ nowFn: opts.ClockOptions().NowFn(),
+ volumeIndexClaims: make(map[string]map[xtime.UnixNano]volumeIndexClaim),
+ nextIndexFileSetVolumeIndexFn: NextIndexFileSetVolumeIndex,
+ }
+}
+
+func (i *indexClaimsManager) ClaimNextIndexFileSetVolumeIndex(
+ md namespace.Metadata,
+ blockStart time.Time,
+) (int, error) {
+ i.Lock()
+ earliestBlockStart := retention.FlushTimeStartForRetentionPeriod(
+ md.Options().RetentionOptions().RetentionPeriod(),
+ md.Options().IndexOptions().BlockSize(),
+ i.nowFn(),
+ )
+ defer func() {
+ i.deleteOutOfRetentionEntriesWithLock(md.ID(), earliestBlockStart)
+ i.Unlock()
+ }()
+
+ // Reject out of retention claims.
+ if blockStart.Before(earliestBlockStart) {
+ return 0, errOutOfRetentionClaim
+ }
+
+ volumeIndexClaimsByBlockStart, ok := i.volumeIndexClaims[md.ID().String()]
+ if !ok {
+ volumeIndexClaimsByBlockStart = make(map[xtime.UnixNano]volumeIndexClaim)
+ i.volumeIndexClaims[md.ID().String()] = volumeIndexClaimsByBlockStart
+ }
+
+ blockStartUnixNanos := xtime.ToUnixNano(blockStart)
+ if curr, ok := volumeIndexClaimsByBlockStart[blockStartUnixNanos]; ok {
+ // Already had a previous claim, return the next claim.
+ next := curr
+ next.volumeIndex++
+ volumeIndexClaimsByBlockStart[blockStartUnixNanos] = next
+ return next.volumeIndex, nil
+ }
+
+ volumeIndex, err := i.nextIndexFileSetVolumeIndexFn(i.filePathPrefix, md.ID(),
+ blockStart)
+ if err != nil {
+ return 0, err
+ }
+ volumeIndexClaimsByBlockStart[blockStartUnixNanos] = volumeIndexClaim{
+ volumeIndex: volumeIndex,
+ }
+ return volumeIndex, nil
+}
+
+func (i *indexClaimsManager) deleteOutOfRetentionEntriesWithLock(
+ nsID ident.ID,
+ earliestBlockStart time.Time,
+) {
+ earliestBlockStartUnixNanos := xtime.ToUnixNano(earliestBlockStart)
+ // ns ID already exists at this point since the delete call is deferred.
+ for blockStart := range i.volumeIndexClaims[nsID.String()] {
+ if blockStart.Before(earliestBlockStartUnixNanos) {
+ delete(i.volumeIndexClaims[nsID.String()], blockStart)
+ }
+ }
+}
+
+type nextIndexFileSetVolumeIndexFn func(
+ filePathPrefix string,
+ namespace ident.ID,
+ blockStart time.Time,
+) (int, error)
diff --git a/src/dbnode/persist/fs/index_claims_manager_test.go b/src/dbnode/persist/fs/index_claims_manager_test.go
new file mode 100644
index 0000000000..de7755e271
--- /dev/null
+++ b/src/dbnode/persist/fs/index_claims_manager_test.go
@@ -0,0 +1,115 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package fs
+
+import (
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/x/ident"
+ xtime "github.com/m3db/m3/src/x/time"
+)
+
+func TestIndexClaimsManagerConcurrentClaims(t *testing.T) {
+ mgr, ok := NewIndexClaimsManager(NewOptions()).(*indexClaimsManager)
+ require.True(t, ok)
+
+ // Always return 0 for starting volume index for testing purposes.
+ mgr.nextIndexFileSetVolumeIndexFn = func(
+ filePathPrefix string,
+ namespace ident.ID,
+ blockStart time.Time,
+ ) (int, error) {
+ return 0, nil
+ }
+
+ md, err := namespace.NewMetadata(ident.StringID("foo"), namespace.NewOptions())
+ require.NoError(t, err)
+
+ var (
+ m sync.Map
+ wg sync.WaitGroup
+ blockSize = md.Options().IndexOptions().BlockSize()
+ blockStart = time.Now().Truncate(blockSize)
+ )
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for j := 0; j < 100; j++ {
+ volumeIndex, err := mgr.ClaimNextIndexFileSetVolumeIndex(
+ md,
+ blockStart,
+ )
+ require.NoError(t, err)
+ _, loaded := m.LoadOrStore(volumeIndex, true)
+ // volume index should not have been previously stored or
+ // there are conflicting volume indices.
+ require.False(t, loaded)
+ }
+ }()
+ }
+ wg.Wait()
+}
+
+// TestIndexClaimsManagerOutOfRetention ensure that we both reject and delete out of
+// retention index claims.
+func TestIndexClaimsManagerOutOfRetention(t *testing.T) {
+ mgr, ok := NewIndexClaimsManager(NewOptions()).(*indexClaimsManager)
+ require.True(t, ok)
+
+ // Always return 0 for starting volume index for testing purposes.
+ mgr.nextIndexFileSetVolumeIndexFn = func(
+ filePathPrefix string,
+ namespace ident.ID,
+ blockStart time.Time,
+ ) (int, error) {
+ return 0, nil
+ }
+
+ md, err := namespace.NewMetadata(ident.StringID("foo"), namespace.NewOptions())
+ blockSize := md.Options().IndexOptions().BlockSize()
+ blockStart := time.Now().Truncate(blockSize)
+ require.NoError(t, err)
+
+ _, err = mgr.ClaimNextIndexFileSetVolumeIndex(
+ md,
+ blockStart,
+ )
+ require.NoError(t, err)
+
+ now := mgr.nowFn().Add(md.Options().RetentionOptions().RetentionPeriod()).
+ Add(blockSize)
+ mgr.nowFn = func() time.Time { return now }
+ _, err = mgr.ClaimNextIndexFileSetVolumeIndex(
+ md,
+ blockStart,
+ )
+ require.Equal(t, errOutOfRetentionClaim, err)
+
+ // Verify that the out of retention entry has been deleted as well.
+ _, ok = mgr.volumeIndexClaims[md.ID().String()][xtime.ToUnixNano(blockStart)]
+ require.False(t, ok)
+}
diff --git a/src/dbnode/persist/fs/index_read_segments.go b/src/dbnode/persist/fs/index_read_segments.go
index 73a398b792..d3c1fcb668 100644
--- a/src/dbnode/persist/fs/index_read_segments.go
+++ b/src/dbnode/persist/fs/index_read_segments.go
@@ -22,6 +22,7 @@ package fs
import (
"errors"
+ "fmt"
"io"
"github.com/m3db/m3/src/m3ninx/index/segment"
@@ -84,12 +85,6 @@ func ReadIndexSegments(
success = false
)
- if validate {
- if err = reader.Validate(); err != nil {
- return ReadIndexSegmentsResult{}, err
- }
- }
-
// Need to do this to guarantee we release all resources in case of failure.
defer func() {
if !success {
@@ -123,6 +118,13 @@ func ReadIndexSegments(
segments = append(segments, seg)
}
+ // Note: need to validate after all segment file sets read.
+ if validate {
+ if err = reader.Validate(); err != nil {
+ return ReadIndexSegmentsResult{}, fmt.Errorf("failed to validate index segments: %w", err)
+ }
+ }
+
// Indicate we don't need the defer() above to release any resources, as we are
// transferring ownership to the caller.
success = true
diff --git a/src/dbnode/persist/fs/migration/migration_test.go b/src/dbnode/persist/fs/migration/migration_test.go
index 8d81f14428..d8a654314a 100644
--- a/src/dbnode/persist/fs/migration/migration_test.go
+++ b/src/dbnode/persist/fs/migration/migration_test.go
@@ -50,17 +50,17 @@ func TestToVersion1_1Run(t *testing.T) {
defer os.RemoveAll(dir)
var shard uint32 = 1
- nsId := ident.StringID("foo")
+ nsID := ident.StringID("foo")
// Write unmigrated fileset to disk
- fsOpts := writeUnmigratedData(t, filePathPrefix, nsId, shard)
+ fsOpts := writeUnmigratedData(t, filePathPrefix, nsID, shard)
// Read info file of just written fileset
- results := fs.ReadInfoFiles(filePathPrefix, nsId, shard,
+ results := fs.ReadInfoFiles(filePathPrefix, nsID, shard,
fsOpts.InfoReaderBufferSize(), fsOpts.DecodingOptions(), persist.FileSetFlushType)
require.Equal(t, 1, len(results))
infoFileResult := results[0]
- indexFd := openFile(t, fsOpts, nsId, shard, infoFileResult, "index")
+ indexFd := openFile(t, fsOpts, nsID, shard, infoFileResult, "index")
oldBytes, err := ioutil.ReadAll(indexFd)
require.NoError(t, err)
@@ -68,8 +68,9 @@ func TestToVersion1_1Run(t *testing.T) {
pm, err := fs.NewPersistManager(
fsOpts.SetEncodingOptions(msgpack.DefaultLegacyEncodingOptions)) // Set encoder to most up-to-date version
require.NoError(t, err)
+ icm := fs.NewIndexClaimsManager(fsOpts)
- md, err := namespace.NewMetadata(nsId, namespace.NewOptions())
+ md, err := namespace.NewMetadata(nsID, namespace.NewOptions())
require.NoError(t, err)
plCache, closer, err := index.NewPostingsListCache(1, index.PostingsListCacheOptions{
@@ -83,6 +84,7 @@ func TestToVersion1_1Run(t *testing.T) {
SetNamespaceMetadata(md).
SetStorageOptions(storage.NewOptions().
SetPersistManager(pm).
+ SetIndexClaimsManager(icm).
SetNamespaceInitializer(namespace.NewStaticInitializer([]namespace.Metadata{md})).
SetRepairEnabled(false).
SetIndexOptions(index.NewOptions().
@@ -99,7 +101,7 @@ func TestToVersion1_1Run(t *testing.T) {
require.NoError(t, err)
// Read new info file and make sure it matches results returned by task
- newInfoFd := openFile(t, fsOpts, nsId, shard, updatedInfoFile, "info")
+ newInfoFd := openFile(t, fsOpts, nsID, shard, updatedInfoFile, "info")
newInfoBytes, err := ioutil.ReadAll(newInfoFd)
require.NoError(t, err)
@@ -111,7 +113,7 @@ func TestToVersion1_1Run(t *testing.T) {
require.Equal(t, updatedInfoFile.Info, info)
// Read the index entries of new volume set
- indexFd = openFile(t, fsOpts, nsId, shard, updatedInfoFile, "index")
+ indexFd = openFile(t, fsOpts, nsID, shard, updatedInfoFile, "index")
newBytes, err := ioutil.ReadAll(indexFd)
require.NoError(t, err)
@@ -129,18 +131,23 @@ func TestToVersion1_1Run(t *testing.T) {
func openFile(
t *testing.T,
fsOpts fs.Options,
- nsId ident.ID,
+ nsID ident.ID,
shard uint32,
infoFileResult fs.ReadInfoFileResult,
fileType string,
) *os.File {
indexFd, err := os.Open(path.Join(fsOpts.FilePathPrefix(), fmt.Sprintf("data/%s/%d/fileset-%d-%d-%s.db",
- nsId.String(), shard, infoFileResult.Info.BlockStart, infoFileResult.Info.VolumeIndex, fileType)))
+ nsID.String(), shard, infoFileResult.Info.BlockStart, infoFileResult.Info.VolumeIndex, fileType)))
require.NoError(t, err)
return indexFd
}
-func writeUnmigratedData(t *testing.T, filePathPrefix string, nsId ident.ID, shard uint32) fs.Options {
+func writeUnmigratedData(
+ t *testing.T,
+ filePathPrefix string,
+ nsID ident.ID,
+ shard uint32,
+) fs.Options {
// Use encoding options that will not generate entry level checksums
eOpts := msgpack.LegacyEncodingOptions{EncodeLegacyIndexEntryVersion: msgpack.LegacyEncodingIndexEntryVersionV2}
@@ -154,7 +161,7 @@ func writeUnmigratedData(t *testing.T, filePathPrefix string, nsId ident.ID, sha
blockStart := time.Now().Truncate(time.Hour)
writerOpts := fs.DataWriterOpenOptions{
Identifier: fs.FileSetFileIdentifier{
- Namespace: nsId,
+ Namespace: nsID,
Shard: shard,
BlockStart: blockStart,
VolumeIndex: 0,
diff --git a/src/dbnode/persist/fs/options.go b/src/dbnode/persist/fs/options.go
index 6aa6d246ef..3ececbe1a2 100644
--- a/src/dbnode/persist/fs/options.go
+++ b/src/dbnode/persist/fs/options.go
@@ -25,10 +25,10 @@ import (
"fmt"
"os"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/persist/fs/msgpack"
"github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/mmap"
"github.com/m3db/m3/src/x/pool"
diff --git a/src/dbnode/persist/fs/persist_manager.go b/src/dbnode/persist/fs/persist_manager.go
index 69a45d2bb2..1051301a69 100644
--- a/src/dbnode/persist/fs/persist_manager.go
+++ b/src/dbnode/persist/fs/persist_manager.go
@@ -26,7 +26,6 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/ratelimit"
"github.com/m3db/m3/src/dbnode/runtime"
@@ -35,8 +34,9 @@ import (
m3ninxfs "github.com/m3db/m3/src/m3ninx/index/segment/fst"
m3ninxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/checked"
- xclose "github.com/m3db/m3/src/x/close"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/pborman/uuid"
"github.com/uber-go/tally"
@@ -93,7 +93,7 @@ type persistManager struct {
metrics persistManagerMetrics
- runtimeOptsListener xclose.SimpleCloser
+ runtimeOptsListener xresource.SimpleCloser
}
type dataPersistManager struct {
@@ -255,22 +255,12 @@ func (pm *persistManager) PrepareIndex(opts persist.IndexPrepareOptions) (persis
return prepared, errPersistManagerCannotPrepareIndexNotPersisting
}
- // NB(prateek): unlike data flush files, we allow multiple index flush files for a single block start.
- // As a result of this, every time we persist index flush data, we have to compute the volume index
- // to uniquely identify a single FileSetFile on disk.
-
- // work out the volume index for the next Index FileSetFile for the given namespace/blockstart
- volumeIndex, err := NextIndexFileSetVolumeIndex(pm.opts.FilePathPrefix(), nsMetadata.ID(), blockStart)
- if err != nil {
- return prepared, err
- }
-
// we now have all the identifier needed to uniquely specificy a single Index FileSetFile on disk.
fileSetID := FileSetFileIdentifier{
FileSetContentType: persist.FileSetIndexContentType,
Namespace: nsID,
BlockStart: blockStart,
- VolumeIndex: volumeIndex,
+ VolumeIndex: opts.VolumeIndex,
}
blockSize := nsMetadata.Options().IndexOptions().BlockSize()
idxWriterOpts := IndexWriterOpenOptions{
diff --git a/src/dbnode/persist/fs/persist_manager_test.go b/src/dbnode/persist/fs/persist_manager_test.go
index b582680671..cd0f39386d 100644
--- a/src/dbnode/persist/fs/persist_manager_test.go
+++ b/src/dbnode/persist/fs/persist_manager_test.go
@@ -346,10 +346,17 @@ func TestPersistenceManagerPrepareIndexFileExists(t *testing.T) {
segWriter.EXPECT().Reset(nil)
assert.NoError(t, flush.DoneIndex())
}()
+ volumeIndex, err := NextIndexFileSetVolumeIndex(
+ pm.filePathPrefix,
+ testNs1ID,
+ blockStart,
+ )
+ require.NoError(t, err)
prepareOpts := persist.IndexPrepareOptions{
NamespaceMetadata: testNs1Metadata(t),
BlockStart: blockStart,
+ VolumeIndex: volumeIndex,
}
writer.EXPECT().Open(xtest.CmpMatcher(
IndexWriterOpenOptions{
diff --git a/src/dbnode/persist/fs/types.go b/src/dbnode/persist/fs/types.go
index 7aa7d9fee6..55a3e70874 100644
--- a/src/dbnode/persist/fs/types.go
+++ b/src/dbnode/persist/fs/types.go
@@ -25,7 +25,6 @@ import (
"os"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
@@ -42,6 +41,7 @@ import (
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/checked"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -719,3 +719,12 @@ type CrossBlockIterator interface {
// Reset resets the iterator to the given block records.
Reset(records []BlockRecord)
}
+
+// IndexClaimsManager manages concurrent claims to volume indices per ns and block start.
+// This allows multiple threads to safely increment the volume index.
+type IndexClaimsManager interface {
+ ClaimNextIndexFileSetVolumeIndex(
+ md namespace.Metadata,
+ blockStart time.Time,
+ ) (int, error)
+}
diff --git a/src/dbnode/persist/fs/write.go b/src/dbnode/persist/fs/write.go
index 5b2c17e6a6..939609547c 100644
--- a/src/dbnode/persist/fs/write.go
+++ b/src/dbnode/persist/fs/write.go
@@ -36,6 +36,7 @@ import (
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/x/checked"
"github.com/m3db/m3/src/x/ident"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/m3db/m3/src/x/serialize"
xtime "github.com/m3db/m3/src/x/time"
@@ -390,7 +391,7 @@ func (w *writer) closeWOIndex() error {
return err
}
- return closeAll(
+ return xresource.CloseAll(
w.infoFdWithDigest,
w.indexFdWithDigest,
w.summariesFdWithDigest,
diff --git a/src/dbnode/persist/types.go b/src/dbnode/persist/types.go
index a24d4040b1..f93f28bb56 100644
--- a/src/dbnode/persist/types.go
+++ b/src/dbnode/persist/types.go
@@ -271,6 +271,7 @@ type IndexPrepareOptions struct {
FileSetType FileSetType
Shards map[uint32]struct{}
IndexVolumeType idxpersist.IndexVolumeType
+ VolumeIndex int
}
// DataPrepareSnapshotOptions is the options struct for the Prepare method that contains
diff --git a/src/dbnode/runtime/runtime_mock.go b/src/dbnode/runtime/runtime_mock.go
index aa45644751..187dc92e50 100644
--- a/src/dbnode/runtime/runtime_mock.go
+++ b/src/dbnode/runtime/runtime_mock.go
@@ -30,7 +30,7 @@ import (
"github.com/m3db/m3/src/dbnode/ratelimit"
"github.com/m3db/m3/src/dbnode/topology"
- "github.com/m3db/m3/src/x/close"
+ "github.com/m3db/m3/src/x/resource"
"github.com/golang/mock/gomock"
)
@@ -488,10 +488,10 @@ func (mr *MockOptionsManagerMockRecorder) Get() *gomock.Call {
}
// RegisterListener mocks base method
-func (m *MockOptionsManager) RegisterListener(l OptionsListener) close.SimpleCloser {
+func (m *MockOptionsManager) RegisterListener(l OptionsListener) resource.SimpleCloser {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RegisterListener", l)
- ret0, _ := ret[0].(close.SimpleCloser)
+ ret0, _ := ret[0].(resource.SimpleCloser)
return ret0
}
diff --git a/src/dbnode/runtime/runtime_options.go b/src/dbnode/runtime/runtime_options.go
index 8baf6f15d0..aece7e16b5 100644
--- a/src/dbnode/runtime/runtime_options.go
+++ b/src/dbnode/runtime/runtime_options.go
@@ -33,7 +33,7 @@ const (
DefaultWriteConsistencyLevel = topology.ConsistencyLevelMajority
// DefaultReadConsistencyLevel is the default read consistency level
- DefaultReadConsistencyLevel = topology.ReadConsistencyLevelMajority
+ DefaultReadConsistencyLevel = topology.ReadConsistencyLevelUnstrictMajority
// DefaultBootstrapConsistencyLevel is the default bootstrap consistency level
DefaultBootstrapConsistencyLevel = topology.ReadConsistencyLevelMajority
diff --git a/src/dbnode/runtime/runtime_options_manager.go b/src/dbnode/runtime/runtime_options_manager.go
index 290b6ca7b0..bcb8630b13 100644
--- a/src/dbnode/runtime/runtime_options_manager.go
+++ b/src/dbnode/runtime/runtime_options_manager.go
@@ -23,7 +23,7 @@ package runtime
import (
"fmt"
- xclose "github.com/m3db/m3/src/x/close"
+ xresource "github.com/m3db/m3/src/x/resource"
xwatch "github.com/m3db/m3/src/x/watch"
)
@@ -52,7 +52,7 @@ func (w *optionsManager) Get() Options {
func (w *optionsManager) RegisterListener(
listener OptionsListener,
-) xclose.SimpleCloser {
+) xresource.SimpleCloser {
_, watch, _ := w.watchable.Watch()
// We always initialize the watchable so always read
@@ -98,7 +98,7 @@ func (n noOpOptionsManager) Get() Options {
func (n noOpOptionsManager) RegisterListener(
listener OptionsListener,
-) xclose.SimpleCloser {
+) xresource.SimpleCloser {
// noOpOptionsManager never changes its options, not worth
// registering listener
return noOpCloser{}
diff --git a/src/dbnode/runtime/types.go b/src/dbnode/runtime/types.go
index cafe9f8046..4b8ae81f9a 100644
--- a/src/dbnode/runtime/types.go
+++ b/src/dbnode/runtime/types.go
@@ -25,7 +25,7 @@ import (
"github.com/m3db/m3/src/dbnode/ratelimit"
"github.com/m3db/m3/src/dbnode/topology"
- xclose "github.com/m3db/m3/src/x/close"
+ xresource "github.com/m3db/m3/src/x/resource"
)
// Options is a set of runtime options.
@@ -195,7 +195,7 @@ type OptionsManager interface {
// RegisterListener registers a listener for updates to runtime options,
// it will synchronously call back the listener when this method is called
// to deliver the current set of runtime options.
- RegisterListener(l OptionsListener) xclose.SimpleCloser
+ RegisterListener(l OptionsListener) xresource.SimpleCloser
// Close closes the watcher and all descendent watches.
Close()
diff --git a/src/x/lockfile/lockfile.go b/src/dbnode/server/lockfile.go
similarity index 78%
rename from src/x/lockfile/lockfile.go
rename to src/dbnode/server/lockfile.go
index bba9d374c5..d678e59d32 100644
--- a/src/x/lockfile/lockfile.go
+++ b/src/dbnode/server/lockfile.go
@@ -18,7 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package lockfile
+package server
import (
"os"
@@ -28,15 +28,15 @@ import (
"golang.org/x/sys/unix"
)
-// Lockfile represents an acquired lockfile.
-type Lockfile struct {
+// lockfile represents an acquired lockfile.
+type lockfile struct {
file os.File
}
-// Acquire creates the given file path if it doesn't exist and
+// acquireLockfile creates the given file path if it doesn't exist and
// obtains an exclusive lock on it. An error is returned if the lock
// has been obtained by another process.
-func Acquire(path string) (*Lockfile, error) {
+func acquireLockfile(path string) (*lockfile, error) {
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return nil, errors.Wrap(err, "failed opening lock path")
@@ -51,23 +51,23 @@ func Acquire(path string) (*Lockfile, error) {
return nil, errors.Wrap(err, "failed obtaining lock")
}
- lf := Lockfile{*file}
+ lf := lockfile{*file}
return &lf, nil
}
-// CreateAndAcquire creates any non-existing directories needed to
-// create the lock file, then acquires a lock on it
-func CreateAndAcquire(path string, newDirMode os.FileMode) (*Lockfile, error) {
+// createAndAcquireLockfile creates any non-existing directories needed to
+// create the lock file, then acquires a lock on it.
+func createAndAcquireLockfile(path string, newDirMode os.FileMode) (*lockfile, error) {
if err := os.MkdirAll(paths.Dir(path), newDirMode); err != nil {
return nil, err
}
- return Acquire(path)
+ return acquireLockfile(path)
}
-// Release releases the lock on the file and removes the file.
-func (lf Lockfile) Release() error {
+// releaseLockfile releases the lock on the file and removes the file.
+func (lf lockfile) releaseLockfile() error {
ft := &unix.Flock_t{
Pid: int32(os.Getpid()),
Type: unix.F_UNLCK,
diff --git a/src/x/lockfile/lockfile_test.go b/src/dbnode/server/lockfile_test.go
similarity index 54%
rename from src/x/lockfile/lockfile_test.go
rename to src/dbnode/server/lockfile_test.go
index 843f5e7135..a0c663b14c 100644
--- a/src/x/lockfile/lockfile_test.go
+++ b/src/dbnode/server/lockfile_test.go
@@ -18,9 +18,10 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package lockfile
+package server
import (
+ "fmt"
"io/ioutil"
"math/rand"
"os"
@@ -29,6 +30,7 @@ import (
"path/filepath"
"strconv"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
)
@@ -36,32 +38,30 @@ import (
func TestAcquire(t *testing.T) {
t.Run("process B can obtain the lock after A exits", func(t *testing.T) {
path := tempPath()
- assert.NoError(t, newLockfileCommand(path, 0, true).Run())
+ assert.NoError(t, newLockfileCommand(path, "", true).Run())
_, err := os.Stat(path)
assert.True(t, os.IsNotExist(err)) // check temp file was removed
- assert.NoError(t, newLockfileCommand(path, 0, true).Run())
+ assert.NoError(t, newLockfileCommand(path, "", true).Run())
})
t.Run("process B can obtain the lock after A exits, even if A didn't remove the lock file", func(t *testing.T) {
path := tempPath()
- assert.NoError(t, newLockfileCommand(path, 0, false).Run())
+ assert.NoError(t, newLockfileCommand(path, "", false).Run())
_, err := os.Stat(path)
assert.False(t, os.IsNotExist(err)) // check temp file was *not* removed
- assert.NoError(t, newLockfileCommand(path, 0, true).Run())
+ assert.NoError(t, newLockfileCommand(path, "", true).Run())
})
t.Run("if process A holds the lock, B must not be able to obtain it", func(t *testing.T) {
path := tempPath()
- procA := newLockfileCommand(path, 1, true)
- procB := newLockfileCommand(path, 1, true)
+ procA := newLockfileCommand(path, "1s", false)
+ procB := newLockfileCommand(path, "1s", false)
- // to avoid sleeping until A obtains the lock (it takes some
- // time for the process to boot and obtain the lock), we start
- // both processes, then check exactly one of them failed
assert.NoError(t, procA.Start())
assert.NoError(t, procB.Start())
+ // one process will acquireLockfile and hold the lock, and the other will fail to acquireLockfile.
errA, errB := procA.Wait(), procB.Wait()
if errA != nil {
@@ -79,25 +79,74 @@ func TestCreateAndAcquire(t *testing.T) {
tempSubDir := path.Join(tempDir, "testDir")
- lock, err := CreateAndAcquire(path.Join(tempSubDir, "testLockfile"), os.ModePerm)
+ lock, err := createAndAcquireLockfile(path.Join(tempSubDir, "testLockfile"), os.ModePerm)
assert.NoError(t, err)
- err = lock.Release()
+ err = lock.releaseLockfile()
assert.NoError(t, err)
- // check CreateAndAcquire() created the missing directory
+ // check createAndAcquireLockfile() created the missing directory
_, err = os.Stat(tempSubDir)
assert.False(t, os.IsNotExist(err))
}
+// TestAcquireAndReleaseFile is invoked as a separate process by other tests in lockfile_test.go
+// to exercise the file locking capabilities. The test is a no-op if run as part
+// of the broader test suite. Given it's run as a separate process, we explicitly use error
+// exit codes as opposed to failing assertions on errors
+func TestAcquireAndReleaseFile(t *testing.T) {
+ // immediately return if this test wasn't invoked by another test in the
+ // nolint: goconst
+ if os.Getenv("LOCKFILE_SUPERVISED_PROCESS") != "true" {
+ t.Skip()
+ }
+
+ var (
+ lockPath = os.Getenv("WITH_LOCK_PATH")
+ removeLock = os.Getenv("WITH_REMOVE_LOCK")
+ sleepDuration = os.Getenv("WITH_SLEEP_DURATION")
+ )
+
+ lock, err := acquireLockfile(lockPath)
+ if err != nil {
+ os.Exit(1)
+ }
+
+ if sleepDuration != "" {
+ duration, err := time.ParseDuration(sleepDuration)
+ if err != nil {
+ os.Exit(1)
+ }
+
+ time.Sleep(duration)
+ }
+
+ if removeLock == "true" {
+ err := lock.releaseLockfile()
+ if err != nil {
+ os.Exit(1)
+ }
+ }
+}
+
func tempPath() string {
return filepath.Join(os.TempDir(), "lockfile_test_"+strconv.Itoa(os.Getpid())+"_"+strconv.Itoa(rand.Intn(100000)))
}
-func newLockfileCommand(lockPath string, sleep int, removeLock bool) *exec.Cmd {
- removeLockStr := "0"
+func newLockfileCommand(lockPath string, sleepDuration string, removeLock bool) *exec.Cmd {
+ removeLockStr := "false"
if removeLock {
- removeLockStr = "1"
+ removeLockStr = "true"
}
- return exec.Command("go", "run", "../../../scripts/lockfile/lockfile.go", lockPath, strconv.Itoa(sleep), removeLockStr)
+ cmd := exec.Command("go", "test", "-run", "TestAcquireAndReleaseFile")
+ cmd.Env = os.Environ()
+ cmd.Env = append(
+ cmd.Env,
+ "LOCKFILE_SUPERVISED_PROCESS=true",
+ fmt.Sprintf("WITH_LOCK_PATH=%s", lockPath),
+ fmt.Sprintf("WITH_SLEEP_DURATION=%s", sleepDuration),
+ fmt.Sprintf("WITH_REMOVE_LOCK=%s", removeLockStr),
+ )
+
+ return cmd
}
diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go
index 2adc9a52d1..31be18cfb0 100644
--- a/src/dbnode/server/server.go
+++ b/src/dbnode/server/server.go
@@ -18,6 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
+// Package server contains the code to run the dbnode server.
package server
import (
@@ -83,7 +84,6 @@ import (
xdocs "github.com/m3db/m3/src/x/docs"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
- "github.com/m3db/m3/src/x/lockfile"
"github.com/m3db/m3/src/x/mmap"
xos "github.com/m3db/m3/src/x/os"
"github.com/m3db/m3/src/x/pool"
@@ -167,7 +167,7 @@ func Run(runOpts RunOptions) {
cfg = runOpts.Config
}
- err := cfg.InitDefaultsAndValidate()
+ err := cfg.Validate()
if err != nil {
// NB(r): Use fmt.Fprintf(os.Stderr, ...) to avoid etcd.SetGlobals()
// sending stdlib "log" to black hole. Don't remove unless with good reason.
@@ -175,7 +175,7 @@ func Run(runOpts RunOptions) {
os.Exit(1)
}
- logger, err := cfg.Logging.BuildLogger()
+ logger, err := cfg.LoggingOrDefault().BuildLogger()
if err != nil {
// NB(r): Use fmt.Fprintf(os.Stderr, ...) to avoid etcd.SetGlobals()
// sending stdlib "log" to black hole. Don't remove unless with good reason.
@@ -222,16 +222,17 @@ func Run(runOpts RunOptions) {
// file will remain on the file system. When a dbnode starts after an ungracefully stop,
// it will be able to acquire the lock despite the fact the the lock file exists.
lockPath := path.Join(cfg.Filesystem.FilePathPrefixOrDefault(), filePathPrefixLockFile)
- fslock, err := lockfile.CreateAndAcquire(lockPath, newDirectoryMode)
+ fslock, err := createAndAcquireLockfile(lockPath, newDirectoryMode)
if err != nil {
- logger.Fatal("could not acquire lock", zap.String("path", lockPath), zap.Error(err))
+ logger.Fatal("could not acqurie lock", zap.String("path", lockPath), zap.Error(err))
}
- defer fslock.Release()
+ // nolint: errcheck
+ defer fslock.releaseLockfile()
go bgValidateProcessLimits(logger)
- debug.SetGCPercent(cfg.GCPercentage)
+ debug.SetGCPercent(cfg.GCPercentageOrDefault())
- scope, _, err := cfg.Metrics.NewRootScope()
+ scope, _, err := cfg.MetricsOrDefault().NewRootScope()
if err != nil {
logger.Fatal("could not connect to metrics", zap.Error(err))
}
@@ -267,17 +268,22 @@ func Run(runOpts RunOptions) {
}
// Presence of KV server config indicates embedded etcd cluster
- if cfg.EnvironmentConfig.SeedNodes == nil {
+ envConfig, err := cfg.DiscoveryConfig.EnvironmentConfig(hostID)
+ if err != nil {
+ logger.Fatal("could not get env config from discovery config", zap.Error(err))
+ }
+
+ if envConfig.SeedNodes == nil {
logger.Info("no seed nodes set, using dedicated etcd cluster")
} else {
// Default etcd client clusters if not set already
- service, err := cfg.EnvironmentConfig.Services.SyncCluster()
+ service, err := envConfig.Services.SyncCluster()
if err != nil {
logger.Fatal("invalid cluster configuration", zap.Error(err))
}
clusters := service.Service.ETCDClusters
- seedNodes := cfg.EnvironmentConfig.SeedNodes.InitialCluster
+ seedNodes := envConfig.SeedNodes.InitialCluster
if len(clusters) == 0 {
endpoints, err := config.InitialClusterEndpoints(seedNodes)
if err != nil {
@@ -330,7 +336,7 @@ func Run(runOpts RunOptions) {
// are constructed allowing for type to be picked
// by the caller using instrument.NewTimer(...).
timerOpts := instrument.NewHistogramTimerOptions(instrument.HistogramTimerOptions{})
- timerOpts.StandardSampleRate = cfg.Metrics.SampleRate()
+ timerOpts.StandardSampleRate = cfg.MetricsOrDefault().SampleRate()
var (
opts = storage.NewOptions()
@@ -392,8 +398,9 @@ func Run(runOpts RunOptions) {
SetLimitEnabled(true).
SetLimitMbps(cfg.Filesystem.ThroughputLimitMbpsOrDefault()).
SetLimitCheckEvery(cfg.Filesystem.ThroughputCheckEveryOrDefault())).
- SetWriteNewSeriesAsync(cfg.WriteNewSeriesAsync).
- SetWriteNewSeriesBackoffDuration(cfg.WriteNewSeriesBackoffDuration)
+ SetWriteNewSeriesAsync(cfg.WriteNewSeriesAsyncOrDefault()).
+ SetWriteNewSeriesBackoffDuration(cfg.WriteNewSeriesBackoffDurationOrDefault())
+
if lruCfg := cfg.Cache.SeriesConfiguration().LRU; lruCfg != nil {
runtimeOpts = runtimeOpts.SetMaxWiredBlocks(lruCfg.MaxBlocks)
}
@@ -440,7 +447,8 @@ func Run(runOpts RunOptions) {
// FOLLOWUP(prateek): remove this once we have the runtime options<->index wiring done
indexOpts := opts.IndexOptions()
insertMode := index.InsertSync
- if cfg.WriteNewSeriesAsync {
+
+ if cfg.WriteNewSeriesAsyncOrDefault() {
insertMode = index.InsertAsync
}
indexOpts = indexOpts.SetInsertMode(insertMode).
@@ -468,7 +476,11 @@ func Run(runOpts RunOptions) {
opts = opts.SetRuntimeOptionsManager(runtimeOptsMgr)
- policy := cfg.PoolingPolicy
+ policy, err := cfg.PoolingPolicyOrDefault()
+ if err != nil {
+ logger.Fatal("could not get pooling policy", zap.Error(err))
+ }
+
tagEncoderPool := serialize.NewTagEncoderPool(
serialize.NewTagEncoderOptions(),
poolOptions(
@@ -510,28 +522,29 @@ func Run(runOpts RunOptions) {
SetMmapReporter(mmapReporter)
var commitLogQueueSize int
- specified := cfg.CommitLog.Queue.Size
- switch cfg.CommitLog.Queue.CalculationType {
+ cfgCommitLog := cfg.CommitLogOrDefault()
+ specified := cfgCommitLog.Queue.Size
+ switch cfgCommitLog.Queue.CalculationType {
case config.CalculationTypeFixed:
commitLogQueueSize = specified
case config.CalculationTypePerCPU:
commitLogQueueSize = specified * runtime.NumCPU()
default:
logger.Fatal("unknown commit log queue size type",
- zap.Any("type", cfg.CommitLog.Queue.CalculationType))
+ zap.Any("type", cfgCommitLog.Queue.CalculationType))
}
var commitLogQueueChannelSize int
- if cfg.CommitLog.QueueChannel != nil {
- specified := cfg.CommitLog.QueueChannel.Size
- switch cfg.CommitLog.Queue.CalculationType {
+ if cfgCommitLog.QueueChannel != nil {
+ specified := cfgCommitLog.QueueChannel.Size
+ switch cfgCommitLog.Queue.CalculationType {
case config.CalculationTypeFixed:
commitLogQueueChannelSize = specified
case config.CalculationTypePerCPU:
commitLogQueueChannelSize = specified * runtime.NumCPU()
default:
logger.Fatal("unknown commit log queue channel size type",
- zap.Any("type", cfg.CommitLog.Queue.CalculationType))
+ zap.Any("type", cfgCommitLog.Queue.CalculationType))
}
} else {
commitLogQueueChannelSize = int(float64(commitLogQueueSize) / commitlog.MaximumQueueSizeQueueChannelSizeRatio)
@@ -542,14 +555,18 @@ func Run(runOpts RunOptions) {
opts = opts.SetSeriesCachePolicy(seriesCachePolicy)
// Apply pooling options.
- opts = withEncodingAndPoolingOptions(cfg, logger, opts, cfg.PoolingPolicy)
+ poolingPolicy, err := cfg.PoolingPolicyOrDefault()
+ if err != nil {
+ logger.Fatal("could not get pooling policy", zap.Error(err))
+ }
+ opts = withEncodingAndPoolingOptions(cfg, logger, opts, poolingPolicy)
opts = opts.SetCommitLogOptions(opts.CommitLogOptions().
SetInstrumentOptions(opts.InstrumentOptions()).
SetFilesystemOptions(fsopts).
SetStrategy(commitlog.StrategyWriteBehind).
- SetFlushSize(cfg.CommitLog.FlushMaxBytes).
- SetFlushInterval(cfg.CommitLog.FlushEvery).
+ SetFlushSize(cfgCommitLog.FlushMaxBytes).
+ SetFlushInterval(cfgCommitLog.FlushEvery).
SetBacklogQueueSize(commitLogQueueSize).
SetBacklogQueueChannelSize(commitLogQueueChannelSize))
@@ -595,13 +612,17 @@ func Run(runOpts RunOptions) {
}
opts = opts.SetPersistManager(pm)
+ // Set the index claims manager
+ icm := fs.NewIndexClaimsManager(fsopts)
+ opts = opts.SetIndexClaimsManager(icm)
+
var (
- envCfg environment.ConfigureResults
+ envCfgResults environment.ConfigureResults
)
- if len(cfg.EnvironmentConfig.Statics) == 0 {
+ if len(envConfig.Statics) == 0 {
logger.Info("creating dynamic config service client with m3cluster")
- envCfg, err = cfg.EnvironmentConfig.Configure(environment.ConfigurationParameters{
+ envCfgResults, err = envConfig.Configure(environment.ConfigurationParameters{
InstrumentOpts: iopts,
HashingSeed: cfg.Hashing.Seed,
NewDirectoryMode: newDirectoryMode,
@@ -613,7 +634,7 @@ func Run(runOpts RunOptions) {
} else {
logger.Info("creating static config service client with m3cluster")
- envCfg, err = cfg.EnvironmentConfig.Configure(environment.ConfigurationParameters{
+ envCfgResults, err = envConfig.Configure(environment.ConfigurationParameters{
InstrumentOpts: iopts,
HostID: hostID,
ForceColdWritesEnabled: runOpts.StorageOptions.ForceColdWritesEnabled,
@@ -623,7 +644,7 @@ func Run(runOpts RunOptions) {
}
}
- syncCfg, err := envCfg.SyncCluster()
+ syncCfg, err := envCfgResults.SyncCluster()
if err != nil {
logger.Fatal("invalid cluster config", zap.Error(err))
}
@@ -663,36 +684,40 @@ func Run(runOpts RunOptions) {
if fn := runOpts.StorageOptions.TChanNodeServerFn; fn != nil {
tchanOpts = tchanOpts.SetTChanNodeServerFn(fn)
}
+
+ listenAddress := cfg.ListenAddressOrDefault()
tchannelthriftNodeClose, err := ttnode.NewServer(service,
- cfg.ListenAddress, contextPool, tchanOpts).ListenAndServe()
+ listenAddress, contextPool, tchanOpts).ListenAndServe()
if err != nil {
logger.Fatal("could not open tchannelthrift interface",
- zap.String("address", cfg.ListenAddress), zap.Error(err))
+ zap.String("address", listenAddress), zap.Error(err))
}
defer tchannelthriftNodeClose()
- logger.Info("node tchannelthrift: listening", zap.String("address", cfg.ListenAddress))
+ logger.Info("node tchannelthrift: listening", zap.String("address", listenAddress))
+ httpListenAddress := cfg.HTTPNodeListenAddressOrDefault()
httpjsonNodeClose, err := hjnode.NewServer(service,
- cfg.HTTPNodeListenAddress, contextPool, nil).ListenAndServe()
+ httpListenAddress, contextPool, nil).ListenAndServe()
if err != nil {
logger.Fatal("could not open httpjson interface",
- zap.String("address", cfg.HTTPNodeListenAddress), zap.Error(err))
+ zap.String("address", httpListenAddress), zap.Error(err))
}
defer httpjsonNodeClose()
- logger.Info("node httpjson: listening", zap.String("address", cfg.HTTPNodeListenAddress))
+ logger.Info("node httpjson: listening", zap.String("address", httpListenAddress))
- if cfg.DebugListenAddress != "" {
+ debugListenAddress := cfg.DebugListenAddressOrDefault()
+ if debugListenAddress != "" {
var debugWriter xdebug.ZipWriter
handlerOpts, err := placement.NewHandlerOptions(syncCfg.ClusterClient,
queryconfig.Configuration{}, nil, iopts)
if err != nil {
logger.Warn("could not create handler options for debug writer", zap.Error(err))
} else {
- envCfg, err := cfg.EnvironmentConfig.Services.SyncCluster()
- if err != nil || envCfg.Service == nil {
+ envCfgCluster, err := envConfig.Services.SyncCluster()
+ if err != nil || envCfgCluster.Service == nil {
logger.Warn("could not get cluster config for debug writer",
zap.Error(err),
- zap.Bool("envCfgServiceIsNil", envCfg.Service == nil))
+ zap.Bool("envCfgClusterServiceIsNil", envCfgCluster.Service == nil))
} else {
debugWriter, err = xdebug.NewPlacementAndNamespaceZipWriterWithDefaultSources(
cpuProfileDuration,
@@ -702,8 +727,8 @@ func Run(runOpts RunOptions) {
{
ServiceName: handleroptions.M3DBServiceName,
Defaults: []handleroptions.ServiceOptionsDefault{
- handleroptions.WithDefaultServiceEnvironment(envCfg.Service.Env),
- handleroptions.WithDefaultServiceZone(envCfg.Service.Zone),
+ handleroptions.WithDefaultServiceEnvironment(envCfgCluster.Service.Env),
+ handleroptions.WithDefaultServiceZone(envCfgCluster.Service.Zone),
},
},
},
@@ -722,12 +747,12 @@ func Run(runOpts RunOptions) {
}
}
- if err := http.ListenAndServe(cfg.DebugListenAddress, mux); err != nil {
+ if err := http.ListenAndServe(debugListenAddress, mux); err != nil {
logger.Error("debug server could not listen",
- zap.String("address", cfg.DebugListenAddress), zap.Error(err))
+ zap.String("address", debugListenAddress), zap.Error(err))
} else {
logger.Info("debug server listening",
- zap.String("address", cfg.DebugListenAddress),
+ zap.String("address", debugListenAddress),
)
}
}()
@@ -863,23 +888,25 @@ func Run(runOpts RunOptions) {
opts = opts.SetBootstrapProcessProvider(bs)
// Start the cluster services now that the M3DB client is available.
+ clusterListenAddress := cfg.ClusterListenAddressOrDefault()
tchannelthriftClusterClose, err := ttcluster.NewServer(m3dbClient,
- cfg.ClusterListenAddress, contextPool, tchannelOpts).ListenAndServe()
+ clusterListenAddress, contextPool, tchannelOpts).ListenAndServe()
if err != nil {
logger.Fatal("could not open tchannelthrift interface",
- zap.String("address", cfg.ClusterListenAddress), zap.Error(err))
+ zap.String("address", clusterListenAddress), zap.Error(err))
}
defer tchannelthriftClusterClose()
- logger.Info("cluster tchannelthrift: listening", zap.String("address", cfg.ClusterListenAddress))
+ logger.Info("cluster tchannelthrift: listening", zap.String("address", clusterListenAddress))
+ httpClusterListenAddress := cfg.HTTPClusterListenAddressOrDefault()
httpjsonClusterClose, err := hjcluster.NewServer(m3dbClient,
- cfg.HTTPClusterListenAddress, contextPool, nil).ListenAndServe()
+ httpClusterListenAddress, contextPool, nil).ListenAndServe()
if err != nil {
logger.Fatal("could not open httpjson interface",
- zap.String("address", cfg.HTTPClusterListenAddress), zap.Error(err))
+ zap.String("address", httpClusterListenAddress), zap.Error(err))
}
defer httpjsonClusterClose()
- logger.Info("cluster httpjson: listening", zap.String("address", cfg.HTTPClusterListenAddress))
+ logger.Info("cluster httpjson: listening", zap.String("address", httpClusterListenAddress))
// Initialize clustered database.
clusterTopoWatch, err := topo.Watch()
@@ -926,7 +953,7 @@ func Run(runOpts RunOptions) {
// Only set the write new series limit after bootstrapping
kvWatchNewSeriesLimitPerShard(syncCfg.KVStore, logger, topo,
- runtimeOptsMgr, cfg.WriteNewSeriesLimitPerSecond)
+ runtimeOptsMgr, cfg.Limits.WriteNewSeriesPerSecond)
kvWatchEncodersPerBlockLimit(syncCfg.KVStore, logger,
runtimeOptsMgr, cfg.Limits.MaxEncodersPerBlock)
}()
diff --git a/src/dbnode/storage/block/wired_list.go b/src/dbnode/storage/block/wired_list.go
index e804188b88..ba2ff0c551 100644
--- a/src/dbnode/storage/block/wired_list.go
+++ b/src/dbnode/storage/block/wired_list.go
@@ -55,8 +55,8 @@ import (
"sync/atomic"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/runtime"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
"github.com/uber-go/tally"
diff --git a/src/dbnode/storage/block/wired_list_test.go b/src/dbnode/storage/block/wired_list_test.go
index 7a473ec0ce..9cc763e8bc 100644
--- a/src/dbnode/storage/block/wired_list_test.go
+++ b/src/dbnode/storage/block/wired_list_test.go
@@ -26,11 +26,11 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/x/checked"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
diff --git a/src/dbnode/storage/bootstrap.go b/src/dbnode/storage/bootstrap.go
index 2c569aee3a..a154e09493 100644
--- a/src/dbnode/storage/bootstrap.go
+++ b/src/dbnode/storage/bootstrap.go
@@ -26,8 +26,8 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/instrument"
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/options.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/options.go
index 89e4787aa2..5790b957d7 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/fs/options.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/options.go
@@ -40,11 +40,12 @@ import (
)
var (
- errPersistManagerNotSet = errors.New("persist manager not set")
- errCompactorNotSet = errors.New("compactor not set")
- errIndexOptionsNotSet = errors.New("index options not set")
- errFilesystemOptionsNotSet = errors.New("filesystem options not set")
- errMigrationOptionsNotSet = errors.New("migration options not set")
+ errPersistManagerNotSet = errors.New("persist manager not set")
+ errIndexClaimsManagerNotSet = errors.New("index claims manager not set")
+ errCompactorNotSet = errors.New("compactor not set")
+ errIndexOptionsNotSet = errors.New("index options not set")
+ errFilesystemOptionsNotSet = errors.New("filesystem options not set")
+ errMigrationOptionsNotSet = errors.New("migration options not set")
// NB(r): Bootstrapping data doesn't use large amounts of memory
// that won't be released, so its fine to do this as fast as possible.
@@ -60,6 +61,9 @@ var (
// defaultIndexSegmentConcurrency defines the default index segment building concurrency.
defaultIndexSegmentConcurrency = 1
+
+ // defaultIndexSegmentsVerify defines default for index segments validation.
+ defaultIndexSegmentsVerify = false
)
type options struct {
@@ -68,8 +72,10 @@ type options struct {
fsOpts fs.Options
indexOpts index.Options
persistManager persist.Manager
+ indexClaimsManager fs.IndexClaimsManager
compactor *compaction.Compactor
indexSegmentConcurrency int
+ indexSegmentsVerify bool
runtimeOptsMgr runtime.OptionsManager
identifierPool ident.Pool
migrationOpts migration.Options
@@ -88,6 +94,7 @@ func NewOptions() Options {
instrumentOpts: instrument.NewOptions(),
resultOpts: result.NewOptions(),
indexSegmentConcurrency: defaultIndexSegmentConcurrency,
+ indexSegmentsVerify: defaultIndexSegmentsVerify,
runtimeOptsMgr: runtime.NewOptionsManager(),
identifierPool: idPool,
migrationOpts: migration.NewOptions(),
@@ -99,6 +106,9 @@ func (o *options) Validate() error {
if o.persistManager == nil {
return errPersistManagerNotSet
}
+ if o.indexClaimsManager == nil {
+ return errIndexClaimsManagerNotSet
+ }
if o.compactor == nil {
return errCompactorNotSet
}
@@ -170,6 +180,16 @@ func (o *options) PersistManager() persist.Manager {
return o.persistManager
}
+func (o *options) SetIndexClaimsManager(value fs.IndexClaimsManager) Options {
+ opts := *o
+ opts.indexClaimsManager = value
+ return &opts
+}
+
+func (o *options) IndexClaimsManager() fs.IndexClaimsManager {
+ return o.indexClaimsManager
+}
+
func (o *options) SetCompactor(value *compaction.Compactor) Options {
opts := *o
opts.compactor = value
@@ -190,6 +210,16 @@ func (o *options) IndexSegmentConcurrency() int {
return o.indexSegmentConcurrency
}
+func (o *options) SetIndexSegmentsVerify(value bool) Options {
+ opts := *o
+ opts.indexSegmentsVerify = value
+ return &opts
+}
+
+func (o *options) IndexSegmentsVerify() bool {
+ return o.indexSegmentsVerify
+}
+
func (o *options) SetRuntimeOptionsManager(value runtime.OptionsManager) Options {
opts := *o
opts.runtimeOptsMgr = value
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go
index e72c05c1cd..a9a790d15f 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go
@@ -25,7 +25,6 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
@@ -46,6 +45,7 @@ import (
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
idxpersist "github.com/m3db/m3/src/m3ninx/persist"
"github.com/m3db/m3/src/x/checked"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -329,9 +329,7 @@ func (s *fileSystemSource) bootstrapFromReaders(
persistManager *bootstrapper.SharedPersistManager,
compactor *bootstrapper.SharedCompactor,
) {
- var (
- resultOpts = s.opts.ResultOptions()
- )
+ resultOpts := s.opts.ResultOptions()
for timeWindowReaders := range readersCh {
// NB(bodu): Since we are re-using the same builder for all bootstrapped index blocks,
@@ -586,6 +584,7 @@ func (s *fileSystemSource) loadShardReadersDataIntoShardResult(
requestedRanges,
builder.Builder(),
persistManager,
+ s.opts.IndexClaimsManager(),
s.opts.ResultOptions(),
existingIndexBlock.Fulfilled(),
blockStart,
@@ -982,12 +981,22 @@ func (s *fileSystemSource) bootstrapFromIndexPersistedBlocks(
continue
}
+ fsOpts := s.fsopts
+ verify := s.opts.IndexSegmentsVerify()
+ if verify {
+ // Make sure for this call to read index segments
+ // to validate the index segment.
+ // If fails validation will rebuild since missing from
+ // fulfilled range.
+ fsOpts = fsOpts.SetIndexReaderAutovalidateIndexSegments(true)
+ }
+
readResult, err := fs.ReadIndexSegments(fs.ReadIndexSegmentsOptions{
ReaderOptions: fs.IndexReaderOpenOptions{
Identifier: infoFile.ID,
FileSetType: persist.FileSetFlushType,
},
- FilesystemOptions: s.fsopts,
+ FilesystemOptions: fsOpts,
})
if err != nil {
s.log.Error("unable to read segments from index fileset",
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go
index f67c75c82c..6c0c30b3a3 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/source_data_test.go
@@ -95,11 +95,13 @@ func newTestOptions(t require.TestingT, filePathPrefix string) Options {
fsOpts := newTestFsOptions(filePathPrefix)
pm, err := fs.NewPersistManager(fsOpts)
require.NoError(t, err)
+ icm := fs.NewIndexClaimsManager(fsOpts)
return testDefaultOpts.
SetCompactor(compactor).
SetIndexOptions(idxOpts).
SetFilesystemOptions(fsOpts).
- SetPersistManager(pm)
+ SetPersistManager(pm).
+ SetIndexClaimsManager(icm)
}
func newTestOptionsWithPersistManager(t require.TestingT, filePathPrefix string) Options {
@@ -936,7 +938,8 @@ func TestReadRunMigrations(t *testing.T) {
writeGoodFilesWithFsOpts(t, testNs1ID, testShard, newTestFsOptions(dir).SetEncodingOptions(eOpts))
opts := newTestOptions(t, dir)
- sOpts, closer := newTestStorageOptions(t, opts.PersistManager())
+ icm := fs.NewIndexClaimsManager(opts.FilesystemOptions())
+ sOpts, closer := newTestStorageOptions(t, opts.PersistManager(), icm)
defer closer()
src, err := newFileSystemSource(opts.
@@ -949,7 +952,11 @@ func TestReadRunMigrations(t *testing.T) {
validateReadResults(t, src, dir, testShardTimeRanges())
}
-func newTestStorageOptions(t *testing.T, pm persist.Manager) (storage.Options, index.Closer) {
+func newTestStorageOptions(
+ t *testing.T,
+ pm persist.Manager,
+ icm fs.IndexClaimsManager,
+) (storage.Options, index.Closer) {
plCache, closer, err := index.NewPostingsListCache(1, index.PostingsListCacheOptions{
InstrumentOptions: instrument.NewOptions(),
})
@@ -960,6 +967,7 @@ func newTestStorageOptions(t *testing.T, pm persist.Manager) (storage.Options, i
return storage.NewOptions().
SetPersistManager(pm).
+ SetIndexClaimsManager(icm).
SetNamespaceInitializer(namespace.NewStaticInitializer([]namespace.Metadata{md})).
SetRepairEnabled(false).
SetIndexOptions(index.NewOptions().
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/types.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/types.go
index 778f133b52..253561338b 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/fs/types.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/types.go
@@ -64,6 +64,13 @@ type Options interface {
// when performing a bootstrap run with persistence enabled.
PersistManager() persist.Manager
+ // SetIndexClaimsManager sets the index claims manager.
+ SetIndexClaimsManager(value fs.IndexClaimsManager) Options
+
+ // IndexClaimsManager returns the index claims manager. It's used to manage
+ // concurrent claims for volume indices per ns and block start.
+ IndexClaimsManager() fs.IndexClaimsManager
+
// SetCompactor sets the compactor used to compact segment builders into segments.
SetCompactor(value *compaction.Compactor) Options
@@ -78,6 +85,14 @@ type Options interface {
// building index segments.
IndexSegmentConcurrency() int
+ // SetIndexSegmentsVerify sets the value for whether to verify bootstrapped
+ // index segments.
+ SetIndexSegmentsVerify(value bool) Options
+
+ // IndexSegmentsVerify returns the value for whether to verify bootstrapped
+ // index segments.
+ IndexSegmentsVerify() bool
+
// SetRuntimeOptionsManager sets the runtime options manager.
SetRuntimeOptionsManager(value runtime.OptionsManager) Options
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/options.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/options.go
index cf7856d7d2..882f4366e5 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/options.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/options.go
@@ -61,6 +61,7 @@ var (
var (
errAdminClientNotSet = errors.New("admin client not set")
errPersistManagerNotSet = errors.New("persist manager not set")
+ errIndexClaimsManagerNotSet = errors.New("index claims manager not set")
errCompactorNotSet = errors.New("compactor not set")
errIndexOptionsNotSet = errors.New("index options not set")
errFilesystemOptionsNotSet = errors.New("filesystem options not set")
@@ -76,6 +77,7 @@ type options struct {
indexSegmentConcurrency int
persistenceMaxQueueSize int
persistManager persist.Manager
+ indexClaimsManager fs.IndexClaimsManager
runtimeOptionsManager m3dbruntime.OptionsManager
contextPool context.Pool
fsOpts fs.Options
@@ -106,6 +108,9 @@ func (o *options) Validate() error {
if o.persistManager == nil {
return errPersistManagerNotSet
}
+ if o.indexClaimsManager == nil {
+ return errIndexClaimsManagerNotSet
+ }
if o.compactor == nil {
return errCompactorNotSet
}
@@ -204,6 +209,16 @@ func (o *options) PersistManager() persist.Manager {
return o.persistManager
}
+func (o *options) SetIndexClaimsManager(value fs.IndexClaimsManager) Options {
+ opts := *o
+ opts.indexClaimsManager = value
+ return &opts
+}
+
+func (o *options) IndexClaimsManager() fs.IndexClaimsManager {
+ return o.indexClaimsManager
+}
+
func (o *options) SetCompactor(value *compaction.Compactor) Options {
opts := *o
opts.compactor = value
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/peers_test.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/peers_test.go
index 0f2310c010..0f8bce23fb 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/peers_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/peers_test.go
@@ -60,12 +60,15 @@ func TestNewPeersBootstrapper(t *testing.T) {
})
require.NoError(t, err)
+ fsOpts := fs.NewOptions()
+ icm := fs.NewIndexClaimsManager(fsOpts)
opts := NewOptions().
SetFilesystemOptions(fs.NewOptions()).
SetIndexOptions(idxOpts).
SetAdminClient(client.NewMockAdminClient(ctrl)).
SetPersistManager(persist.NewMockManager(ctrl)).
- SetFilesystemOptions(fs.NewOptions()).
+ SetIndexClaimsManager(icm).
+ SetFilesystemOptions(fsOpts).
SetCompactor(compactor).
SetRuntimeOptionsManager(runtime.NewMockOptionsManager(ctrl))
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go
index 0ab602644e..e9768fb0f5 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go
@@ -28,7 +28,6 @@ import (
"github.com/m3db/m3/src/cluster/shard"
"github.com/m3db/m3/src/dbnode/client"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
@@ -45,10 +44,11 @@ import (
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
idxpersist "github.com/m3db/m3/src/m3ninx/persist"
- xclose "github.com/m3db/m3/src/x/close"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
+ xresource "github.com/m3db/m3/src/x/resource"
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
@@ -340,7 +340,7 @@ func (s *peersSource) startPersistenceQueueWorkerLoop(
persistFlush, bootstrapResult, lock)
}()
- return xclose.CloserFn(persistFlush.DoneFlush), nil
+ return xresource.CloserFn(persistFlush.DoneFlush), nil
}
// runPersistenceQueueWorkerLoop is meant to be run in its own goroutine, and it creates a worker that
@@ -929,6 +929,7 @@ func (s *peersSource) processReaders(
requestedRanges,
builder.Builder(),
persistManager,
+ s.opts.IndexClaimsManager(),
s.opts.ResultOptions(),
existingIndexBlock.Fulfilled(),
blockStart,
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go
index d297c5a31e..b2baf0e1a1 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_data_test.go
@@ -98,11 +98,14 @@ func newTestDefaultOpts(t *testing.T, ctrl *gomock.Controller) Options {
},
})
require.NoError(t, err)
+ fsOpts := fs.NewOptions()
+ icm := fs.NewIndexClaimsManager(fsOpts)
return NewOptions().
SetResultOptions(testDefaultResultOpts).
SetPersistManager(persist.NewMockManager(ctrl)).
+ SetIndexClaimsManager(icm).
SetAdminClient(client.NewMockAdminClient(ctrl)).
- SetFilesystemOptions(fs.NewOptions()).
+ SetFilesystemOptions(fsOpts).
SetCompactor(compactor).
SetIndexOptions(idxOpts).
SetAdminClient(newValidMockClient(t, ctrl)).
@@ -623,7 +626,6 @@ func TestPeersSourceMarksUnfulfilledOnPersistenceErrors(t *testing.T) {
PrepareData(prepareOpts).
Return(persist.PreparedDataPersist{
Persist: func(metadata persist.Metadata, segment ts.Segment, checksum uint32) error {
- panic("wat")
assert.Fail(t, "not expecting to flush shard 0 at start + block size")
return nil
},
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_index_test.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_index_test.go
index 420b32655b..c2c75167e2 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source_index_test.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source_index_test.go
@@ -157,6 +157,9 @@ func TestBootstrapIndex(t *testing.T) {
require.NoError(t, err)
opts = opts.SetPersistManager(pm)
+ icm := fs.NewIndexClaimsManager(opts.FilesystemOptions())
+ opts = opts.SetIndexClaimsManager(icm)
+
blockSize := 2 * time.Hour
indexBlockSize := 2 * blockSize
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/types.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/types.go
index 1ea5793a63..cdf518f334 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/types.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/types.go
@@ -104,6 +104,13 @@ type Options interface {
// when performing a bootstrap with persistence.
PersistManager() persist.Manager
+ // SetIndexClaimsManager sets the index claims manager.
+ SetIndexClaimsManager(value fs.IndexClaimsManager) Options
+
+ // IndexClaimsManager returns the index claims manager. It's used to manage
+ // concurrent claims for volume indices per ns and block start.
+ IndexClaimsManager() fs.IndexClaimsManager
+
// SetCompactor sets the compactor used to compact segment builders into segments.
SetCompactor(value *compaction.Compactor) Options
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/persist.go b/src/dbnode/storage/bootstrap/bootstrapper/persist.go
index acb908c7f0..cb7219af19 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/persist.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/persist.go
@@ -27,6 +27,7 @@ import (
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
+ "github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
@@ -58,6 +59,7 @@ func PersistBootstrapIndexSegment(
requestedRanges result.ShardTimeRanges,
builder segment.DocumentsBuilder,
persistManager *SharedPersistManager,
+ indexClaimsManager fs.IndexClaimsManager,
resultOpts result.Options,
fulfilled result.ShardTimeRanges,
blockStart time.Time,
@@ -111,6 +113,7 @@ func PersistBootstrapIndexSegment(
shards,
builder,
persistManager,
+ indexClaimsManager,
requestedRanges,
expectedRanges,
fulfilled,
@@ -124,6 +127,7 @@ func persistBootstrapIndexSegment(
shards map[uint32]struct{},
builder segment.DocumentsBuilder,
persistManager *SharedPersistManager,
+ indexClaimsManager fs.IndexClaimsManager,
requestedRanges result.ShardTimeRanges,
expectedRanges result.ShardTimeRanges,
fulfilled result.ShardTimeRanges,
@@ -160,6 +164,14 @@ func persistBootstrapIndexSegment(
}
}()
+ volumeIndex, err := indexClaimsManager.ClaimNextIndexFileSetVolumeIndex(
+ ns,
+ blockStart,
+ )
+ if err != nil {
+ return result.IndexBlock{}, fmt.Errorf("failed to claim next index volume index: %w", err)
+ }
+
preparedPersist, err := flush.PrepareIndex(persist.IndexPrepareOptions{
NamespaceMetadata: ns,
BlockStart: blockStart,
@@ -167,6 +179,7 @@ func persistBootstrapIndexSegment(
Shards: shards,
// NB(bodu): Assume default volume type when persisted bootstrapped index data.
IndexVolumeType: idxpersist.DefaultIndexVolumeType,
+ VolumeIndex: volumeIndex,
})
if err != nil {
return result.IndexBlock{}, err
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/readers.go b/src/dbnode/storage/bootstrap/bootstrapper/readers.go
index bc158aaae7..d9c6cbacc2 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/readers.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/readers.go
@@ -24,12 +24,12 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
+ "github.com/m3db/m3/src/x/clock"
xtime "github.com/m3db/m3/src/x/time"
"github.com/opentracing/opentracing-go"
diff --git a/src/dbnode/storage/bootstrap/process.go b/src/dbnode/storage/bootstrap/process.go
index f28df53883..fb81692b74 100644
--- a/src/dbnode/storage/bootstrap/process.go
+++ b/src/dbnode/storage/bootstrap/process.go
@@ -25,7 +25,6 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
@@ -33,6 +32,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/tracepoint"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xtime "github.com/m3db/m3/src/x/time"
diff --git a/src/dbnode/storage/bootstrap/result/options.go b/src/dbnode/storage/bootstrap/result/options.go
index 0be030c398..531e972a8a 100644
--- a/src/dbnode/storage/bootstrap/result/options.go
+++ b/src/dbnode/storage/bootstrap/result/options.go
@@ -21,9 +21,9 @@
package result
import (
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/series"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
)
diff --git a/src/dbnode/storage/bootstrap/result/types.go b/src/dbnode/storage/bootstrap/result/types.go
index 423553ee22..f0df61f114 100644
--- a/src/dbnode/storage/bootstrap/result/types.go
+++ b/src/dbnode/storage/bootstrap/result/types.go
@@ -23,11 +23,11 @@ package result
import (
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/persist"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
xtime "github.com/m3db/m3/src/x/time"
diff --git a/src/dbnode/storage/cleanup.go b/src/dbnode/storage/cleanup.go
index d91db51329..96dde48304 100644
--- a/src/dbnode/storage/cleanup.go
+++ b/src/dbnode/storage/cleanup.go
@@ -26,11 +26,11 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
"github.com/m3db/m3/src/dbnode/retention"
+ "github.com/m3db/m3/src/x/clock"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
diff --git a/src/dbnode/storage/database.go b/src/dbnode/storage/database.go
index 495387fe44..77e7fa4236 100644
--- a/src/dbnode/storage/database.go
+++ b/src/dbnode/storage/database.go
@@ -28,7 +28,6 @@ import (
"sync/atomic"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
"github.com/m3db/m3/src/dbnode/persist/fs/wide"
@@ -41,6 +40,7 @@ import (
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
diff --git a/src/x/dice/dice.go b/src/dbnode/storage/dice.go
similarity index 85%
rename from src/x/dice/dice.go
rename to src/dbnode/storage/dice.go
index fde1a26385..710ee07e6c 100644
--- a/src/x/dice/dice.go
+++ b/src/dbnode/storage/dice.go
@@ -18,7 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package dice
+package storage
import (
"fmt"
@@ -26,17 +26,17 @@ import (
"github.com/MichaelTJones/pcg"
)
-// Dice is an interface that allows for random sampling.
-type Dice interface {
- // Rate returns the sampling rate of this Dice: a number in (0.0, 1.0].
+// dice is an interface that allows for random sampling.
+type dice interface {
+ // Rate returns the sampling rate of this dice: a number in (0.0, 1.0].
Rate() float64
// Roll returns whether the dice roll succeeded.
Roll() bool
}
-// NewDice constructs a new Dice based on a given success rate.
-func NewDice(rate float64) (Dice, error) {
+// newDice constructs a new dice based on a given success rate.
+func newDice(rate float64) (dice, error) {
if rate <= 0.0 || rate > 1.0 {
return nil, fmt.Errorf("invalid sample rate %f", rate)
}
diff --git a/src/x/dice/dice_test.go b/src/dbnode/storage/dice_test.go
similarity index 94%
rename from src/x/dice/dice_test.go
rename to src/dbnode/storage/dice_test.go
index 523c2b7410..ca018cc29b 100644
--- a/src/x/dice/dice_test.go
+++ b/src/dbnode/storage/dice_test.go
@@ -18,7 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-package dice
+package storage
import (
"testing"
@@ -28,17 +28,17 @@ import (
)
func TestDiceConstructor(t *testing.T) {
- dice, err := NewDice(0)
+ dice, err := newDice(0)
require.Error(t, err)
require.Nil(t, dice)
- dice, err = NewDice(2)
+ dice, err = newDice(2)
require.Error(t, err)
require.Nil(t, dice)
}
func TestDice(t *testing.T) {
- r, err := NewDice(1)
+ r, err := newDice(1)
require.NoError(t, err)
assert.Equal(t, float64(1.0), r.Rate())
diff --git a/src/dbnode/storage/flush.go b/src/dbnode/storage/flush.go
index 13b04d757b..d081579f32 100644
--- a/src/dbnode/storage/flush.go
+++ b/src/dbnode/storage/flush.go
@@ -26,10 +26,10 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
"github.com/m3db/m3/src/dbnode/retention"
+ "github.com/m3db/m3/src/x/clock"
xerrors "github.com/m3db/m3/src/x/errors"
xtime "github.com/m3db/m3/src/x/time"
diff --git a/src/dbnode/storage/forward_index_dice.go b/src/dbnode/storage/forward_index_dice.go
index 30ba4061aa..69c4b767fc 100644
--- a/src/dbnode/storage/forward_index_dice.go
+++ b/src/dbnode/storage/forward_index_dice.go
@@ -23,8 +23,6 @@ package storage
import (
"fmt"
"time"
-
- "github.com/m3db/m3/src/x/dice"
)
// forwardIndexDice is a die roll that adds a chance for incoming index writes
@@ -36,7 +34,7 @@ type forwardIndexDice struct {
blockSize time.Duration
forwardIndexThreshold time.Duration
- forwardIndexDice dice.Dice
+ forwardIndexDice dice
}
func newForwardIndexDice(
@@ -72,7 +70,7 @@ func newForwardIndexDice(
bufferFragment := float64(bufferFuture) * threshold
forwardIndexThreshold = blockSize - time.Duration(bufferFragment)
- dice, err := dice.NewDice(probability)
+ dice, err := newDice(probability)
if err != nil {
return forwardIndexDice{},
fmt.Errorf("cannot create forward write dice: %s", err)
diff --git a/src/dbnode/storage/fs.go b/src/dbnode/storage/fs.go
index 0fd7509982..e6363f75f0 100644
--- a/src/dbnode/storage/fs.go
+++ b/src/dbnode/storage/fs.go
@@ -60,8 +60,8 @@ type fileOpState struct {
// BlockLeaseVerifier needs to know that a higher cold flush version exists on disk so that
// it can approve the SeekerManager's request to open a lease on the latest version.
//
- // In other words ColdVersionRetrievabled is used to keep track of the latest cold version that has
- // been succesfully flushed and can be queried via the block retriever / seeker manager and
+ // In other words ColdVersionRetrievable is used to keep track of the latest cold version that has
+ // been successfully flushed and can be queried via the block retriever / seeker manager and
// as a result is safe to evict, while ColdVersionFlushed is used to keep track of the latest
// cold version that has been flushed and to validate lease requests from the SeekerManager when it
// receives a signal to open a new lease.
diff --git a/src/dbnode/storage/index.go b/src/dbnode/storage/index.go
index 1aa73d162f..c811175b59 100644
--- a/src/dbnode/storage/index.go
+++ b/src/dbnode/storage/index.go
@@ -31,7 +31,6 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
@@ -53,13 +52,13 @@ import (
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/builder"
idxpersist "github.com/m3db/m3/src/m3ninx/persist"
- xclose "github.com/m3db/m3/src/x/close"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
xopentracing "github.com/m3db/m3/src/x/opentracing"
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
@@ -117,8 +116,8 @@ type nsIndex struct {
logger *zap.Logger
opts Options
nsMetadata namespace.Metadata
- runtimeOptsListener xclose.SimpleCloser
- runtimeNsOptsListener xclose.SimpleCloser
+ runtimeOptsListener xresource.SimpleCloser
+ runtimeNsOptsListener xresource.SimpleCloser
resultsPool index.QueryResultsPool
aggregateResultsPool index.AggregateResultsPool
@@ -211,7 +210,7 @@ type newNamespaceIndexOpts struct {
// execBlockQueryFn executes a query against the given block whilst tracking state.
type execBlockQueryFn func(
ctx context.Context,
- cancellable *resource.CancellableLifetime,
+ cancellable *xresource.CancellableLifetime,
block index.Block,
query index.Query,
opts index.QueryOptions,
@@ -1132,6 +1131,14 @@ func (i *nsIndex) flushBlock(
allShards[shard.ID()] = struct{}{}
}
+ volumeIndex, err := i.opts.IndexClaimsManager().ClaimNextIndexFileSetVolumeIndex(
+ i.nsMetadata,
+ indexBlock.StartTime(),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to claim next index volume index: %w", err)
+ }
+
preparedPersist, err := flush.PrepareIndex(persist.IndexPrepareOptions{
NamespaceMetadata: i.nsMetadata,
BlockStart: indexBlock.StartTime(),
@@ -1139,6 +1146,7 @@ func (i *nsIndex) flushBlock(
Shards: allShards,
// NB(bodu): By default, we always write to the "default" index volume type.
IndexVolumeType: idxpersist.DefaultIndexVolumeType,
+ VolumeIndex: volumeIndex,
})
if err != nil {
return nil, err
@@ -1555,7 +1563,7 @@ func (i *nsIndex) queryWithSpan(
// Create a cancellable lifetime and cancel it at end of this method so that
// no child async task modifies the result after this method returns.
- cancellable := resource.NewCancellableLifetime()
+ cancellable := xresource.NewCancellableLifetime()
defer cancellable.Cancel()
for _, block := range blocks {
@@ -1663,7 +1671,7 @@ func (i *nsIndex) queryWithSpan(
func (i *nsIndex) execBlockQueryFn(
ctx context.Context,
- cancellable *resource.CancellableLifetime,
+ cancellable *xresource.CancellableLifetime,
block index.Block,
query index.Query,
opts index.QueryOptions,
@@ -1701,7 +1709,7 @@ func (i *nsIndex) execBlockQueryFn(
func (i *nsIndex) execBlockWideQueryFn(
ctx context.Context,
- cancellable *resource.CancellableLifetime,
+ cancellable *xresource.CancellableLifetime,
block index.Block,
query index.Query,
opts index.QueryOptions,
@@ -1745,7 +1753,7 @@ func (i *nsIndex) execBlockWideQueryFn(
func (i *nsIndex) execBlockAggregateQueryFn(
ctx context.Context,
- cancellable *resource.CancellableLifetime,
+ cancellable *xresource.CancellableLifetime,
block index.Block,
query index.Query,
opts index.QueryOptions,
diff --git a/src/dbnode/storage/index/block.go b/src/dbnode/storage/index/block.go
index f1f0e80b9f..120fe09a3d 100644
--- a/src/dbnode/storage/index/block.go
+++ b/src/dbnode/storage/index/block.go
@@ -43,7 +43,7 @@ import (
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
xtime "github.com/m3db/m3/src/x/time"
"github.com/opentracing/opentracing-go"
@@ -400,7 +400,7 @@ func (b *block) segmentReadersWithRLock() ([]segment.Reader, error) {
// to the results datastructure).
func (b *block) Query(
ctx context.Context,
- cancellable *resource.CancellableLifetime,
+ cancellable *xresource.CancellableLifetime,
query Query,
opts QueryOptions,
results BaseResults,
@@ -420,7 +420,7 @@ func (b *block) Query(
func (b *block) queryWithSpan(
ctx context.Context,
- cancellable *resource.CancellableLifetime,
+ cancellable *xresource.CancellableLifetime,
query Query,
opts QueryOptions,
results BaseResults,
@@ -465,7 +465,7 @@ func (b *block) queryWithSpan(
return false, errCancelledQuery
}
execCloseRegistered = true // Make sure to not locally close it.
- ctx.RegisterFinalizer(resource.FinalizerFn(func() {
+ ctx.RegisterFinalizer(xresource.FinalizerFn(func() {
b.closeAsync(exec)
}))
cancellable.ReleaseCheckout()
@@ -530,7 +530,7 @@ func (b *block) closeAsync(closer io.Closer) {
}
func (b *block) addQueryResults(
- cancellable *resource.CancellableLifetime,
+ cancellable *xresource.CancellableLifetime,
results BaseResults,
batch []doc.Document,
) ([]doc.Document, int, int, error) {
@@ -548,7 +548,7 @@ func (b *block) addQueryResults(
return batch, 0, 0, errCancelledQuery
}
- // try to add the docs to the resource.
+ // try to add the docs to the xresource.
size, docsCount, err := results.AddDocuments(batch)
// immediately release the checkout on the lifetime of query.
@@ -572,7 +572,7 @@ func (b *block) addQueryResults(
// pre-aggregated results via the FST underlying the index.
func (b *block) Aggregate(
ctx context.Context,
- cancellable *resource.CancellableLifetime,
+ cancellable *xresource.CancellableLifetime,
opts QueryOptions,
results AggregateResults,
logFields []opentracinglog.Field,
@@ -591,7 +591,7 @@ func (b *block) Aggregate(
func (b *block) aggregateWithSpan(
ctx context.Context,
- cancellable *resource.CancellableLifetime,
+ cancellable *xresource.CancellableLifetime,
opts QueryOptions,
results AggregateResults,
sp opentracing.Span,
@@ -667,7 +667,7 @@ func (b *block) aggregateWithSpan(
// read by the readers.
for _, reader := range readers {
reader := reader // Capture for inline function.
- ctx.RegisterFinalizer(resource.FinalizerFn(func() {
+ ctx.RegisterFinalizer(xresource.FinalizerFn(func() {
b.closeAsync(reader)
}))
}
@@ -792,7 +792,7 @@ func (b *block) pooledID(id []byte) ident.ID {
}
func (b *block) addAggregateResults(
- cancellable *resource.CancellableLifetime,
+ cancellable *xresource.CancellableLifetime,
results AggregateResults,
batch []AggregateResultsEntry,
) ([]AggregateResultsEntry, int, int, error) {
@@ -810,7 +810,7 @@ func (b *block) addAggregateResults(
return batch, 0, 0, errCancelledQuery
}
- // try to add the docs to the resource.
+ // try to add the docs to the xresource.
size, docsCount := results.AddFields(batch)
// immediately release the checkout on the lifetime of query.
diff --git a/src/dbnode/storage/index/block_prop_test.go b/src/dbnode/storage/index/block_prop_test.go
index 447ca2f12e..0624a3923a 100644
--- a/src/dbnode/storage/index/block_prop_test.go
+++ b/src/dbnode/storage/index/block_prop_test.go
@@ -40,7 +40,7 @@ import (
"github.com/m3db/m3/src/m3ninx/search/proptest"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/instrument"
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/leanovate/gopter"
"github.com/leanovate/gopter/prop"
@@ -107,7 +107,7 @@ func TestPostingsListCacheDoesNotAffectBlockQueryResults(t *testing.T) {
idx.NewQueryFromSearchQuery(q),
}
- cancellable := resource.NewCancellableLifetime()
+ cancellable := xresource.NewCancellableLifetime()
cancelled := false
doneQuery := func() {
if !cancelled {
diff --git a/src/dbnode/storage/index/block_test.go b/src/dbnode/storage/index/block_test.go
index 5bb078b676..e8f4e64ee4 100644
--- a/src/dbnode/storage/index/block_test.go
+++ b/src/dbnode/storage/index/block_test.go
@@ -40,7 +40,7 @@ import (
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/pool"
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
xtime "github.com/m3db/m3/src/x/time"
"github.com/golang/mock/gomock"
@@ -366,7 +366,7 @@ func TestBlockQueryAfterClose(t *testing.T) {
require.Equal(t, start.Add(time.Hour), b.EndTime())
require.NoError(t, b.Close())
- _, err = b.Query(context.NewContext(), resource.NewCancellableLifetime(),
+ _, err = b.Query(context.NewContext(), xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{}, nil, emptyLogFields)
require.Error(t, err)
}
@@ -382,7 +382,7 @@ func TestBlockQueryWithCancelledQuery(t *testing.T) {
require.Equal(t, start.Add(time.Hour), b.EndTime())
// Precancel query.
- cancellable := resource.NewCancellableLifetime()
+ cancellable := xresource.NewCancellableLifetime()
cancellable.Cancel()
_, err = b.Query(context.NewContext(), cancellable,
@@ -405,7 +405,7 @@ func TestBlockQueryExecutorError(t *testing.T) {
return nil, fmt.Errorf("random-err")
}
- _, err = b.Query(context.NewContext(), resource.NewCancellableLifetime(),
+ _, err = b.Query(context.NewContext(), xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{}, nil, emptyLogFields)
require.Error(t, err)
}
@@ -428,7 +428,7 @@ func TestBlockQuerySegmentReaderError(t *testing.T) {
randErr := fmt.Errorf("random-err")
seg.EXPECT().Reader().Return(nil, randErr)
- _, err = b.Query(context.NewContext(), resource.NewCancellableLifetime(),
+ _, err = b.Query(context.NewContext(), xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{}, nil, emptyLogFields)
require.Equal(t, randErr, err)
}
@@ -468,7 +468,7 @@ func TestBlockQueryAddResultsSegmentsError(t *testing.T) {
randErr := fmt.Errorf("random-err")
seg3.EXPECT().Reader().Return(nil, randErr)
- _, err = b.Query(context.NewContext(), resource.NewCancellableLifetime(),
+ _, err = b.Query(context.NewContext(), xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{}, nil, emptyLogFields)
require.Equal(t, randErr, err)
}
@@ -495,7 +495,7 @@ func TestBlockMockQueryExecutorExecError(t *testing.T) {
exec.EXPECT().Execute(gomock.Any()).Return(nil, fmt.Errorf("randomerr")),
exec.EXPECT().Close(),
)
- _, err = b.Query(context.NewContext(), resource.NewCancellableLifetime(),
+ _, err = b.Query(context.NewContext(), xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{}, nil, emptyLogFields)
require.Error(t, err)
}
@@ -531,7 +531,7 @@ func TestBlockMockQueryExecutorExecIterErr(t *testing.T) {
ctx := context.NewContext()
- _, err = b.Query(ctx, resource.NewCancellableLifetime(),
+ _, err = b.Query(ctx, xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{},
NewQueryResults(nil, QueryResultsOptions{}, testOpts), emptyLogFields)
require.Error(t, err)
@@ -575,7 +575,7 @@ func TestBlockMockQueryExecutorExecLimit(t *testing.T) {
ctx := context.NewContext()
- exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
+ exhaustive, err := b.Query(ctx, xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{SeriesLimit: limit}, results, emptyLogFields)
require.NoError(t, err)
require.False(t, exhaustive)
@@ -622,7 +622,7 @@ func TestBlockMockQueryExecutorExecIterCloseErr(t *testing.T) {
ctx := context.NewContext()
- _, err = b.Query(ctx, resource.NewCancellableLifetime(),
+ _, err = b.Query(ctx, xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{}, results, emptyLogFields)
require.Error(t, err)
@@ -664,7 +664,7 @@ func TestBlockMockQuerySeriesLimitNonExhaustive(t *testing.T) {
ctx := context.NewContext()
- exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
+ exhaustive, err := b.Query(ctx, xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{SeriesLimit: limit}, results, emptyLogFields)
require.NoError(t, err)
require.False(t, exhaustive)
@@ -715,7 +715,7 @@ func TestBlockMockQuerySeriesLimitExhaustive(t *testing.T) {
ctx := context.NewContext()
- exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
+ exhaustive, err := b.Query(ctx, xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{SeriesLimit: limit}, results, emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
@@ -766,7 +766,7 @@ func TestBlockMockQueryDocsLimitNonExhaustive(t *testing.T) {
ctx := context.NewContext()
- exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
+ exhaustive, err := b.Query(ctx, xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{DocsLimit: docsLimit}, results, emptyLogFields)
require.NoError(t, err)
require.False(t, exhaustive)
@@ -817,7 +817,7 @@ func TestBlockMockQueryDocsLimitExhaustive(t *testing.T) {
ctx := context.NewContext()
- exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
+ exhaustive, err := b.Query(ctx, xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{DocsLimit: docsLimit}, results, emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
@@ -871,7 +871,7 @@ func TestBlockMockQueryMergeResultsMapLimit(t *testing.T) {
ctx := context.NewContext()
- exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
+ exhaustive, err := b.Query(ctx, xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{SeriesLimit: limit}, results, emptyLogFields)
require.NoError(t, err)
require.False(t, exhaustive)
@@ -926,7 +926,7 @@ func TestBlockMockQueryMergeResultsDupeID(t *testing.T) {
ctx := context.NewContext()
- exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
+ exhaustive, err := b.Query(ctx, xresource.NewCancellableLifetime(),
defaultQuery, QueryOptions{}, results, emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
@@ -1395,7 +1395,7 @@ func TestBlockE2EInsertQuery(t *testing.T) {
ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
results := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
- exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
+ exhaustive, err := b.Query(ctx, xresource.NewCancellableLifetime(),
Query{q}, QueryOptions{}, results, emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
@@ -1471,7 +1471,7 @@ func TestBlockE2EInsertQueryLimit(t *testing.T) {
limit := 1
results := NewQueryResults(nil,
QueryResultsOptions{SizeLimit: limit}, testOpts)
- exhaustive, err := b.Query(context.NewContext(), resource.NewCancellableLifetime(),
+ exhaustive, err := b.Query(context.NewContext(), xresource.NewCancellableLifetime(),
Query{q}, QueryOptions{SeriesLimit: limit}, results, emptyLogFields)
require.NoError(t, err)
require.False(t, exhaustive)
@@ -1560,7 +1560,7 @@ func TestBlockE2EInsertAddResultsQuery(t *testing.T) {
ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
results := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
- exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
+ exhaustive, err := b.Query(ctx, xresource.NewCancellableLifetime(),
Query{q}, QueryOptions{}, results, emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
@@ -1639,7 +1639,7 @@ func TestBlockE2EInsertAddResultsMergeQuery(t *testing.T) {
ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
results := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
- exhaustive, err := b.Query(ctx, resource.NewCancellableLifetime(),
+ exhaustive, err := b.Query(ctx, xresource.NewCancellableLifetime(),
Query{q}, QueryOptions{}, results, emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
@@ -1782,7 +1782,7 @@ func TestBlockAggregateAfterClose(t *testing.T) {
require.Equal(t, start.Add(time.Hour), b.EndTime())
require.NoError(t, b.Close())
- _, err = b.Aggregate(context.NewContext(), resource.NewCancellableLifetime(),
+ _, err = b.Aggregate(context.NewContext(), xresource.NewCancellableLifetime(),
QueryOptions{}, nil, emptyLogFields)
require.Error(t, err)
}
@@ -1829,7 +1829,12 @@ func TestBlockAggregateIterationErr(t *testing.T) {
ctx := context.NewContext()
defer ctx.BlockingClose()
- _, err = b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{SeriesLimit: 3}, results, emptyLogFields)
+ _, err = b.Aggregate(
+ ctx,
+ xresource.NewCancellableLifetime(),
+ QueryOptions{SeriesLimit: 3},
+ results,
+ emptyLogFields)
require.Error(t, err)
}
@@ -1885,7 +1890,12 @@ func TestBlockAggregate(t *testing.T) {
iter.EXPECT().Err().Return(nil),
iter.EXPECT().Close().Return(nil),
)
- exhaustive, err := b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{SeriesLimit: 3}, results, emptyLogFields)
+ exhaustive, err := b.Aggregate(
+ ctx,
+ xresource.NewCancellableLifetime(),
+ QueryOptions{SeriesLimit: 3},
+ results,
+ emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
@@ -1956,7 +1966,12 @@ func TestBlockAggregateNotExhaustive(t *testing.T) {
iter.EXPECT().Err().Return(nil),
iter.EXPECT().Close().Return(nil),
)
- exhaustive, err := b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{SeriesLimit: 1}, results, emptyLogFields)
+ exhaustive, err := b.Aggregate(
+ ctx,
+ xresource.NewCancellableLifetime(),
+ QueryOptions{SeriesLimit: 1},
+ results,
+ emptyLogFields)
require.NoError(t, err)
require.False(t, exhaustive)
@@ -2043,7 +2058,12 @@ func TestBlockE2EInsertAggregate(t *testing.T) {
sp := mtr.StartSpan("root")
ctx.SetGoContext(opentracing.ContextWithSpan(stdlibctx.Background(), sp))
- exhaustive, err := b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{SeriesLimit: 10}, results, emptyLogFields)
+ exhaustive, err := b.Aggregate(
+ ctx,
+ xresource.NewCancellableLifetime(),
+ QueryOptions{SeriesLimit: 10},
+ results,
+ emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
assertAggregateResultsMapEquals(t, map[string][]string{
@@ -2056,7 +2076,12 @@ func TestBlockE2EInsertAggregate(t *testing.T) {
Type: AggregateTagNamesAndValues,
FieldFilter: AggregateFieldFilter{[]byte("bar")},
}, testOpts)
- exhaustive, err = b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{SeriesLimit: 10}, results, emptyLogFields)
+ exhaustive, err = b.Aggregate(
+ ctx,
+ xresource.NewCancellableLifetime(),
+ QueryOptions{SeriesLimit: 10},
+ results,
+ emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
assertAggregateResultsMapEquals(t, map[string][]string{
@@ -2068,7 +2093,12 @@ func TestBlockE2EInsertAggregate(t *testing.T) {
Type: AggregateTagNamesAndValues,
FieldFilter: AggregateFieldFilter{[]byte("random")},
}, testOpts)
- exhaustive, err = b.Aggregate(ctx, resource.NewCancellableLifetime(), QueryOptions{SeriesLimit: 10}, results, emptyLogFields)
+ exhaustive, err = b.Aggregate(
+ ctx,
+ xresource.NewCancellableLifetime(),
+ QueryOptions{SeriesLimit: 10},
+ results,
+ emptyLogFields)
require.NoError(t, err)
require.True(t, exhaustive)
assertAggregateResultsMapEquals(t, map[string][]string{}, results)
diff --git a/src/dbnode/storage/index/index_mock.go b/src/dbnode/storage/index/index_mock.go
index 2aede296a2..d98d3c231b 100644
--- a/src/dbnode/storage/index/index_mock.go
+++ b/src/dbnode/storage/index/index_mock.go
@@ -28,7 +28,6 @@ import (
"reflect"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
"github.com/m3db/m3/src/dbnode/storage/limits"
@@ -37,6 +36,7 @@ import (
"github.com/m3db/m3/src/m3ninx/index/segment/builder"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
"github.com/m3db/m3/src/m3ninx/index/segment/mem"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
diff --git a/src/dbnode/storage/index/mutable_segments.go b/src/dbnode/storage/index/mutable_segments.go
index baa904cd7b..256bb4a313 100644
--- a/src/dbnode/storage/index/mutable_segments.go
+++ b/src/dbnode/storage/index/mutable_segments.go
@@ -35,10 +35,10 @@ import (
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/builder"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
- xclose "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/mmap"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/uber-go/tally"
"go.uber.org/zap"
@@ -73,7 +73,7 @@ type mutableSegments struct {
blockOpts BlockOptions
opts Options
iopts instrument.Options
- optsListener xclose.SimpleCloser
+ optsListener xresource.SimpleCloser
writeIndexingConcurrency int
metrics mutableSegmentsMetrics
diff --git a/src/dbnode/storage/index/options.go b/src/dbnode/storage/index/options.go
index ba5243272e..3ff3e9dd1e 100644
--- a/src/dbnode/storage/index/options.go
+++ b/src/dbnode/storage/index/options.go
@@ -23,13 +23,13 @@ package index
import (
"errors"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
"github.com/m3db/m3/src/dbnode/storage/limits"
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/index/segment/builder"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
"github.com/m3db/m3/src/m3ninx/index/segment/mem"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/mmap"
diff --git a/src/dbnode/storage/index/segments.go b/src/dbnode/storage/index/segments.go
index 837b9fc325..ce3d8ae5b3 100644
--- a/src/dbnode/storage/index/segments.go
+++ b/src/dbnode/storage/index/segments.go
@@ -23,8 +23,8 @@ package index
import (
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/m3ninx/index/segment"
+ "github.com/m3db/m3/src/x/clock"
)
type readableSeg struct {
diff --git a/src/dbnode/storage/index/types.go b/src/dbnode/storage/index/types.go
index 88323e7909..78b2cbc1ea 100644
--- a/src/dbnode/storage/index/types.go
+++ b/src/dbnode/storage/index/types.go
@@ -25,7 +25,6 @@ import (
"sort"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/storage/bootstrap/result"
"github.com/m3db/m3/src/dbnode/storage/index/compaction"
@@ -36,12 +35,13 @@ import (
"github.com/m3db/m3/src/m3ninx/index/segment/builder"
"github.com/m3db/m3/src/m3ninx/index/segment/fst"
"github.com/m3db/m3/src/m3ninx/index/segment/mem"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/mmap"
"github.com/m3db/m3/src/x/pool"
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
xtime "github.com/m3db/m3/src/x/time"
opentracinglog "github.com/opentracing/opentracing-go/log"
@@ -352,7 +352,7 @@ type Block interface {
// Query resolves the given query into known IDs.
Query(
ctx context.Context,
- cancellable *resource.CancellableLifetime,
+ cancellable *xresource.CancellableLifetime,
query Query,
opts QueryOptions,
results BaseResults,
@@ -364,7 +364,7 @@ type Block interface {
// avoid going to documents, relying purely on the indexed FSTs.
Aggregate(
ctx context.Context,
- cancellable *resource.CancellableLifetime,
+ cancellable *xresource.CancellableLifetime,
opts QueryOptions,
results AggregateResults,
logFields []opentracinglog.Field,
diff --git a/src/dbnode/storage/index_insert_queue.go b/src/dbnode/storage/index_insert_queue.go
index f3edbbc7c8..b168b99879 100644
--- a/src/dbnode/storage/index_insert_queue.go
+++ b/src/dbnode/storage/index_insert_queue.go
@@ -26,10 +26,10 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/ts/writes"
+ "github.com/m3db/m3/src/x/clock"
xsync "github.com/m3db/m3/src/x/sync"
"github.com/uber-go/tally"
diff --git a/src/dbnode/storage/index_query_concurrent_test.go b/src/dbnode/storage/index_query_concurrent_test.go
index f7f1172367..0c41e5df02 100644
--- a/src/dbnode/storage/index_query_concurrent_test.go
+++ b/src/dbnode/storage/index_query_concurrent_test.go
@@ -34,7 +34,7 @@ import (
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/idx"
"github.com/m3db/m3/src/x/context"
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
xsync "github.com/m3db/m3/src/x/sync"
xtest "github.com/m3db/m3/src/x/test"
"go.uber.org/zap"
@@ -232,7 +232,7 @@ func testNamespaceIndexHighConcurrentQueries(
Query(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(
_ context.Context,
- _ *resource.CancellableLifetime,
+ _ *xresource.CancellableLifetime,
_ index.Query,
_ index.QueryOptions,
_ index.QueryResults,
@@ -246,7 +246,7 @@ func testNamespaceIndexHighConcurrentQueries(
Query(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
DoAndReturn(func(
ctx context.Context,
- c *resource.CancellableLifetime,
+ c *xresource.CancellableLifetime,
q index.Query,
opts index.QueryOptions,
r index.QueryResults,
diff --git a/src/dbnode/storage/index_queue_forward_write_test.go b/src/dbnode/storage/index_queue_forward_write_test.go
index dcc096b882..15804cf2b4 100644
--- a/src/dbnode/storage/index_queue_forward_write_test.go
+++ b/src/dbnode/storage/index_queue_forward_write_test.go
@@ -26,7 +26,6 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/index"
@@ -35,7 +34,7 @@ import (
xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
"github.com/m3db/m3/src/m3ninx/doc"
m3ninxidx "github.com/m3db/m3/src/m3ninx/idx"
- xclock "github.com/m3db/m3/src/x/clock"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
xtest "github.com/m3db/m3/src/x/test"
@@ -494,7 +493,7 @@ func verifyShard(
next time.Time,
id string,
) {
- allQueriesSuccess := xclock.WaitUntil(func() bool {
+ allQueriesSuccess := clock.WaitUntil(func() bool {
query := m3ninxidx.NewFieldQuery([]byte(id))
// check current index block for series
res, err := idx.Query(ctx, index.Query{Query: query}, index.QueryOptions{
diff --git a/src/dbnode/storage/index_queue_test.go b/src/dbnode/storage/index_queue_test.go
index 4fe044bebf..1b1c3e2e4c 100644
--- a/src/dbnode/storage/index_queue_test.go
+++ b/src/dbnode/storage/index_queue_test.go
@@ -26,12 +26,12 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
m3dberrors "github.com/m3db/m3/src/dbnode/storage/errors"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/m3ninx/doc"
m3ninxidx "github.com/m3db/m3/src/m3ninx/idx"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
xtest "github.com/m3db/m3/src/x/test"
diff --git a/src/dbnode/storage/mediator.go b/src/dbnode/storage/mediator.go
index b0afb18896..efe70159e7 100644
--- a/src/dbnode/storage/mediator.go
+++ b/src/dbnode/storage/mediator.go
@@ -25,9 +25,9 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
"github.com/uber-go/tally"
diff --git a/src/dbnode/storage/namespace.go b/src/dbnode/storage/namespace.go
index 6465df365d..5367b17574 100644
--- a/src/dbnode/storage/namespace.go
+++ b/src/dbnode/storage/namespace.go
@@ -28,7 +28,6 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
@@ -43,12 +42,13 @@ import (
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/dbnode/x/xio"
- xclose "github.com/m3db/m3/src/x/close"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
xopentracing "github.com/m3db/m3/src/x/opentracing"
+ xresource "github.com/m3db/m3/src/x/resource"
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
@@ -122,7 +122,7 @@ type dbNamespace struct {
// schemaDescr caches the latest schema for the namespace.
// schemaDescr is updated whenever schema registry is updated.
- schemaListener xclose.SimpleCloser
+ schemaListener xresource.SimpleCloser
schemaDescr namespace.SchemaDescr
// Contains an entry to all shards for fast shard lookup, an
diff --git a/src/dbnode/storage/options.go b/src/dbnode/storage/options.go
index 3beba1a5a4..999de9a99e 100644
--- a/src/dbnode/storage/options.go
+++ b/src/dbnode/storage/options.go
@@ -29,7 +29,6 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/client"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/dbnode/namespace"
@@ -46,6 +45,7 @@ import (
"github.com/m3db/m3/src/dbnode/ts/writes"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -98,6 +98,7 @@ var (
errRepairOptionsNotSet = errors.New("repair enabled but repair options are not set")
errIndexOptionsNotSet = errors.New("index enabled but index options are not set")
errPersistManagerNotSet = errors.New("persist manager is not set")
+ errIndexClaimsManagerNotSet = errors.New("index claims manager is not set")
errBlockLeaserNotSet = errors.New("block leaser is not set")
errOnColdFlushNotSet = errors.New("on cold flush is not set, requires at least a no-op implementation")
)
@@ -142,6 +143,7 @@ type options struct {
newDecoderFn encoding.NewDecoderFn
bootstrapProcessProvider bootstrap.ProcessProvider
persistManager persist.Manager
+ indexClaimsManager fs.IndexClaimsManager
blockRetrieverManager block.DatabaseBlockRetrieverManager
poolOpts pool.ObjectPoolOptions
contextPool context.Pool
@@ -293,6 +295,11 @@ func (o *options) Validate() error {
return errPersistManagerNotSet
}
+ // validate that index claims manager is present
+ if o.indexClaimsManager == nil {
+ return errIndexClaimsManagerNotSet
+ }
+
// validate series cache policy
if err := series.ValidateCachePolicy(o.seriesCachePolicy); err != nil {
return err
@@ -550,6 +557,16 @@ func (o *options) PersistManager() persist.Manager {
return o.persistManager
}
+func (o *options) SetIndexClaimsManager(value fs.IndexClaimsManager) Options {
+ opts := *o
+ opts.indexClaimsManager = value
+ return &opts
+}
+
+func (o *options) IndexClaimsManager() fs.IndexClaimsManager {
+ return o.indexClaimsManager
+}
+
func (o *options) SetDatabaseBlockRetrieverManager(value block.DatabaseBlockRetrieverManager) Options {
opts := *o
opts.blockRetrieverManager = value
diff --git a/src/dbnode/storage/repair.go b/src/dbnode/storage/repair.go
index 8f6eb523b2..8c727a6990 100644
--- a/src/dbnode/storage/repair.go
+++ b/src/dbnode/storage/repair.go
@@ -31,7 +31,6 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/client"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/retention"
"github.com/m3db/m3/src/dbnode/storage/block"
@@ -39,8 +38,8 @@ import (
"github.com/m3db/m3/src/dbnode/storage/repair"
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
- "github.com/m3db/m3/src/x/dice"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -768,7 +767,7 @@ func (r shardRepairer) shadowCompare(
shard databaseShard,
nsCtx namespace.Context,
) error {
- dice, err := dice.NewDice(r.rpopts.DebugShadowComparisonsPercentage())
+ dice, err := newDice(r.rpopts.DebugShadowComparisonsPercentage())
if err != nil {
return fmt.Errorf("err creating shadow comparison dice: %v", err)
}
diff --git a/src/dbnode/storage/series/buffer.go b/src/dbnode/storage/series/buffer.go
index 1e7ed84451..165cf53bb8 100644
--- a/src/dbnode/storage/series/buffer.go
+++ b/src/dbnode/storage/series/buffer.go
@@ -27,13 +27,13 @@ import (
"sync/atomic"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
diff --git a/src/dbnode/storage/series/lookup/entry.go b/src/dbnode/storage/series/lookup/entry.go
index 8e1917524e..a1319af9c7 100644
--- a/src/dbnode/storage/series/lookup/entry.go
+++ b/src/dbnode/storage/series/lookup/entry.go
@@ -25,12 +25,12 @@ import (
"sync/atomic"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/bootstrap"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/ts/writes"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xtime "github.com/m3db/m3/src/x/time"
)
diff --git a/src/dbnode/storage/series/options.go b/src/dbnode/storage/series/options.go
index 682b432e28..155749c03a 100644
--- a/src/dbnode/storage/series/options.go
+++ b/src/dbnode/storage/series/options.go
@@ -21,11 +21,11 @@
package series
import (
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/retention"
m3dbruntime "github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/block"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
diff --git a/src/dbnode/storage/series/series_test.go b/src/dbnode/storage/series/series_test.go
index 7d9d918f5a..3d9441e923 100644
--- a/src/dbnode/storage/series/series_test.go
+++ b/src/dbnode/storage/series/series_test.go
@@ -27,7 +27,6 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/dbnode/persist"
@@ -37,6 +36,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/index/convert"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/dbnode/x/xio"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
diff --git a/src/dbnode/storage/series/types.go b/src/dbnode/storage/series/types.go
index cf434c8e30..439393e8c4 100644
--- a/src/dbnode/storage/series/types.go
+++ b/src/dbnode/storage/series/types.go
@@ -23,7 +23,6 @@ package series
import (
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
@@ -33,6 +32,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
diff --git a/src/dbnode/storage/series_wired_list_interaction_test.go b/src/dbnode/storage/series_wired_list_interaction_test.go
index b446bfa5aa..4a67f9a657 100644
--- a/src/dbnode/storage/series_wired_list_interaction_test.go
+++ b/src/dbnode/storage/series_wired_list_interaction_test.go
@@ -25,13 +25,13 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/block"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/storage/series/lookup"
"github.com/m3db/m3/src/dbnode/ts"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
diff --git a/src/dbnode/storage/shard.go b/src/dbnode/storage/shard.go
index 292a5dfb05..49f8642757 100644
--- a/src/dbnode/storage/shard.go
+++ b/src/dbnode/storage/shard.go
@@ -29,7 +29,6 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/encoding/tile"
"github.com/m3db/m3/src/dbnode/generated/proto/annotation"
@@ -54,11 +53,12 @@ import (
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/x/checked"
- xclose "github.com/m3db/m3/src/x/close"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
+ xresource "github.com/m3db/m3/src/x/resource"
xtime "github.com/m3db/m3/src/x/time"
"github.com/gogo/protobuf/proto"
@@ -185,7 +185,7 @@ type dbShard struct {
contextPool context.Pool
flushState shardFlushState
tickWg *sync.WaitGroup
- runtimeOptsListenClosers []xclose.SimpleCloser
+ runtimeOptsListenClosers []xresource.SimpleCloser
currRuntimeOptions dbShardRuntimeOptions
logger *zap.Logger
metrics dbShardMetrics
@@ -2850,7 +2850,10 @@ func (s *dbShard) AggregateTiles(
// Notify all block leasers that a new volume for the namespace/shard/blockstart
// has been created. This will block until all leasers have relinquished their
// leases.
- if err = s.finishWriting(opts.Start, nextVolume); err != nil {
+ // NB: markWarmFlushStateSuccess=true because there are no flushes happening in this
+ // flow and we need to set WarmStatus to fileOpSuccess explicitly in order to make
+ // the new blocks readable.
+ if err = s.finishWriting(opts.Start, nextVolume, true); err != nil {
multiErr = multiErr.Add(err)
}
}
@@ -2999,14 +3002,22 @@ func (s *dbShard) logFlushResult(r dbShardFlushResult) {
)
}
-func (s *dbShard) finishWriting(startTime time.Time, nextVersion int) error {
+func (s *dbShard) finishWriting(
+ blockStart time.Time,
+ nextVersion int,
+ markWarmFlushStateSuccess bool,
+) error {
+ if markWarmFlushStateSuccess {
+ s.markWarmFlushStateSuccess(blockStart)
+ }
+
// After writing the full block successfully update the ColdVersionFlushed number. This will
// allow the SeekerManager to open a lease on the latest version of the fileset files because
// the BlockLeaseVerifier will check the ColdVersionFlushed value, but the buffer only looks at
// ColdVersionRetrievable so a concurrent tick will not yet cause the blocks in memory to be
// evicted (which is the desired behavior because we haven't updated the open leases yet which
// means the newly written data is not available for querying via the SeekerManager yet.)
- s.setFlushStateColdVersionFlushed(startTime, nextVersion)
+ s.setFlushStateColdVersionFlushed(blockStart, nextVersion)
// Notify all block leasers that a new volume for the namespace/shard/blockstart
// has been created. This will block until all leasers have relinquished their
@@ -3014,7 +3025,7 @@ func (s *dbShard) finishWriting(startTime time.Time, nextVersion int) error {
_, err := s.opts.BlockLeaseManager().UpdateOpenLeases(block.LeaseDescriptor{
Namespace: s.namespace.ID(),
Shard: s.ID(),
- BlockStart: startTime,
+ BlockStart: blockStart,
}, block.LeaseState{Volume: nextVersion})
// After writing the full block successfully **and** propagating the new lease to the
// BlockLeaseManager, update the ColdVersionRetrievable in the flush state. Once this function
@@ -3026,13 +3037,13 @@ func (s *dbShard) finishWriting(startTime time.Time, nextVersion int) error {
// succeeded, but that would allow the ColdVersionRetrievable and ColdVersionFlushed numbers to drift
// which would increase the complexity of the code to address a situation that is probably not
// recoverable (failure to UpdateOpenLeases is an invariant violated error).
- s.setFlushStateColdVersionRetrievable(startTime, nextVersion)
+ s.setFlushStateColdVersionRetrievable(blockStart, nextVersion)
if err != nil {
instrument.EmitAndLogInvariantViolation(s.opts.InstrumentOptions(), func(l *zap.Logger) {
l.With(
zap.String("namespace", s.namespace.ID().String()),
zap.Uint32("shard", s.ID()),
- zap.Time("blockStart", startTime),
+ zap.Time("blockStart", blockStart),
zap.Int("nextVersion", nextVersion),
).Error("failed to update open leases after updating flush state cold version")
})
@@ -3063,7 +3074,7 @@ func (s shardColdFlush) Done() error {
continue
}
- err := s.shard.finishWriting(startTime, nextVersion)
+ err := s.shard.finishWriting(startTime, nextVersion, false)
if err != nil {
multiErr = multiErr.Add(err)
}
diff --git a/src/dbnode/storage/shard_insert_queue.go b/src/dbnode/storage/shard_insert_queue.go
index fe66bfd45a..794b9848a3 100644
--- a/src/dbnode/storage/shard_insert_queue.go
+++ b/src/dbnode/storage/shard_insert_queue.go
@@ -26,13 +26,13 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/series"
"github.com/m3db/m3/src/dbnode/storage/series/lookup"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/x/checked"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/ident"
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
diff --git a/src/dbnode/storage/shard_ref_count_test.go b/src/dbnode/storage/shard_ref_count_test.go
index 1ca60a44ae..517848dc81 100644
--- a/src/dbnode/storage/shard_ref_count_test.go
+++ b/src/dbnode/storage/shard_ref_count_test.go
@@ -27,13 +27,12 @@ import (
"github.com/fortytw2/leaktest"
"github.com/golang/mock/gomock"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/runtime"
"github.com/m3db/m3/src/dbnode/storage/index"
"github.com/m3db/m3/src/dbnode/storage/series"
xmetrics "github.com/m3db/m3/src/dbnode/x/metrics"
- xclock "github.com/m3db/m3/src/x/clock"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
xtime "github.com/m3db/m3/src/x/time"
@@ -270,7 +269,7 @@ func TestShardWriteAsyncRefCount(t *testing.T) {
assert.NoError(t, err)
assert.True(t, seriesWrite.WasWritten)
- inserted := xclock.WaitUntil(func() bool {
+ inserted := clock.WaitUntil(func() bool {
counter, ok := testReporter.Counters()["dbshard.insert-queue.inserts"]
return ok && counter == 3
}, 2*time.Second)
@@ -436,7 +435,7 @@ func testShardWriteTaggedAsyncRefCount(t *testing.T, idx NamespaceIndex, nowFn f
seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexSuccess(idx.BlockStartForWriteTime(now))
seriesWrite.PendingIndexInsert.Entry.OnIndexSeries.OnIndexFinalize(idx.BlockStartForWriteTime(now))
- inserted := xclock.WaitUntil(func() bool {
+ inserted := clock.WaitUntil(func() bool {
counter, ok := testReporter.Counters()["dbshard.insert-queue.inserts"]
return ok && counter == 3
}, 5*time.Second)
diff --git a/src/dbnode/storage/storage_mock.go b/src/dbnode/storage/storage_mock.go
index feb8fbef7f..b11da30f9c 100644
--- a/src/dbnode/storage/storage_mock.go
+++ b/src/dbnode/storage/storage_mock.go
@@ -30,7 +30,6 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/client"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
@@ -49,6 +48,7 @@ import (
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
"github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -4124,6 +4124,34 @@ func (mr *MockOptionsMockRecorder) PersistManager() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PersistManager", reflect.TypeOf((*MockOptions)(nil).PersistManager))
}
+// SetIndexClaimsManager mocks base method
+func (m *MockOptions) SetIndexClaimsManager(value fs.IndexClaimsManager) Options {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetIndexClaimsManager", value)
+ ret0, _ := ret[0].(Options)
+ return ret0
+}
+
+// SetIndexClaimsManager indicates an expected call of SetIndexClaimsManager
+func (mr *MockOptionsMockRecorder) SetIndexClaimsManager(value interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIndexClaimsManager", reflect.TypeOf((*MockOptions)(nil).SetIndexClaimsManager), value)
+}
+
+// IndexClaimsManager mocks base method
+func (m *MockOptions) IndexClaimsManager() fs.IndexClaimsManager {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IndexClaimsManager")
+ ret0, _ := ret[0].(fs.IndexClaimsManager)
+ return ret0
+}
+
+// IndexClaimsManager indicates an expected call of IndexClaimsManager
+func (mr *MockOptionsMockRecorder) IndexClaimsManager() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IndexClaimsManager", reflect.TypeOf((*MockOptions)(nil).IndexClaimsManager))
+}
+
// SetDatabaseBlockRetrieverManager mocks base method
func (m *MockOptions) SetDatabaseBlockRetrieverManager(value block.DatabaseBlockRetrieverManager) Options {
m.ctrl.T.Helper()
diff --git a/src/dbnode/storage/tick.go b/src/dbnode/storage/tick.go
index 33585e8517..8a95d2f54b 100644
--- a/src/dbnode/storage/tick.go
+++ b/src/dbnode/storage/tick.go
@@ -25,8 +25,8 @@ import (
"sync"
"time"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/runtime"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
xerrors "github.com/m3db/m3/src/x/errors"
diff --git a/src/dbnode/storage/types.go b/src/dbnode/storage/types.go
index b7753bc9f8..185ac9897e 100644
--- a/src/dbnode/storage/types.go
+++ b/src/dbnode/storage/types.go
@@ -26,7 +26,6 @@ import (
"time"
"github.com/m3db/m3/src/dbnode/client"
- "github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/persist"
@@ -47,6 +46,7 @@ import (
"github.com/m3db/m3/src/dbnode/x/xio"
"github.com/m3db/m3/src/dbnode/x/xpool"
"github.com/m3db/m3/src/m3ninx/doc"
+ "github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/context"
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
@@ -1125,6 +1125,12 @@ type Options interface {
// PersistManager returns the persistence manager.
PersistManager() persist.Manager
+ // SetIndexClaimsManager sets the index claims manager.
+ SetIndexClaimsManager(value fs.IndexClaimsManager) Options
+
+ // IndexClaimsManager returns the index claims manager.
+ IndexClaimsManager() fs.IndexClaimsManager
+
// SetDatabaseBlockRetrieverManager sets the block retriever manager to
// use when bootstrapping retrievable blocks instead of blocks
// containing data.
diff --git a/src/dbnode/storage/util.go b/src/dbnode/storage/util.go
index 335e67d1c4..527e562ab8 100644
--- a/src/dbnode/storage/util.go
+++ b/src/dbnode/storage/util.go
@@ -80,7 +80,11 @@ func DefaultTestOptions() Options {
SetBlockLeaseManager(blockLeaseManager)
})
- return defaultTestOptions
+ // Needs a unique index claims manager each time as it tracks volume indices via in mem claims that
+ // should be different per test.
+ fsOpts := defaultTestOptions.CommitLogOptions().FilesystemOptions()
+ icm := fs.NewIndexClaimsManager(fsOpts)
+ return defaultTestOptions.SetIndexClaimsManager(icm)
}
// numIntervals returns the number of intervals between [start, end] for a given
diff --git a/src/dbnode/x/xio/types.go b/src/dbnode/x/xio/types.go
index 8fe7cf1680..fd02692fed 100644
--- a/src/dbnode/x/xio/types.go
+++ b/src/dbnode/x/xio/types.go
@@ -26,7 +26,7 @@ import (
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/x/pool"
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
)
// BlockReader represents a block reader backed by a
@@ -43,7 +43,7 @@ var EmptyBlockReader = BlockReader{}
// SegmentReader implements the io reader interface backed by a segment.
type SegmentReader interface {
io.Reader
- resource.Finalizer
+ xresource.Finalizer
// Segment gets the segment read by this reader.
Segment() (ts.Segment, error)
diff --git a/src/msg/integration/setup.go b/src/msg/integration/setup.go
index d797077021..0c377a2f46 100644
--- a/src/msg/integration/setup.go
+++ b/src/msg/integration/setup.go
@@ -536,7 +536,10 @@ func (c *testConsumer) consumeAndAck(totalConsumed *atomic.Int64) {
func testPlacementService(store kv.Store, sid services.ServiceID, isSharded bool) placement.Service {
opts := placement.NewOptions().SetShardStateMode(placement.StableShardStateOnly).SetIsSharded(isSharded)
- return service.NewPlacementService(storage.NewPlacementStorage(store, sid.String(), opts), opts)
+
+ return service.NewPlacementService(
+ storage.NewPlacementStorage(store, sid.String(), opts),
+ service.WithPlacementOptions(opts))
}
func testProducer(
diff --git a/src/msg/producer/writer/consumer_service_writer_test.go b/src/msg/producer/writer/consumer_service_writer_test.go
index f9d66e02a1..f88804e3e7 100644
--- a/src/msg/producer/writer/consumer_service_writer_test.go
+++ b/src/msg/producer/writer/consumer_service_writer_test.go
@@ -608,7 +608,8 @@ func TestConsumerServiceWriterUpdateNonShardedPlacementWithReplicatedConsumption
cs := topic.NewConsumerService().SetServiceID(sid).SetConsumptionType(topic.Replicated)
sd := services.NewMockServices(ctrl)
pOpts := placement.NewOptions().SetIsSharded(false)
- ps := service.NewPlacementService(storage.NewPlacementStorage(mem.NewStore(), sid.String(), pOpts), pOpts)
+ ps := service.NewPlacementService(storage.NewPlacementStorage(mem.NewStore(), sid.String(), pOpts),
+ service.WithPlacementOptions(pOpts))
sd.EXPECT().PlacementService(sid, gomock.Any()).Return(ps, nil)
_, err := ps.BuildInitialPlacement([]placement.Instance{
placement.NewInstance().SetID("i1").SetEndpoint("i1").SetWeight(1),
@@ -668,5 +669,7 @@ func TestConsumerServiceCloseShardWritersConcurrently(t *testing.T) {
}
func testPlacementService(store kv.Store, sid services.ServiceID) placement.Service {
- return service.NewPlacementService(storage.NewPlacementStorage(store, sid.String(), placement.NewOptions()), placement.NewOptions())
+ return service.NewPlacementService(
+ storage.NewPlacementStorage(store, sid.String(), placement.NewOptions()),
+ )
}
diff --git a/src/query/api/v1/handler/database/create.go b/src/query/api/v1/handler/database/create.go
index 76e433b661..de9e7054f8 100644
--- a/src/query/api/v1/handler/database/create.go
+++ b/src/query/api/v1/handler/database/create.go
@@ -573,7 +573,7 @@ func defaultedPlacementInitRequest(
return nil, errMissingEmbeddedDBConfig
}
- addr := embeddedDbCfg.ListenAddress
+ addr := embeddedDbCfg.ListenAddressOrDefault()
port, err := portFromEmbeddedDBConfigListenAddress(addr)
if err != nil {
return nil, err
diff --git a/src/query/api/v1/handler/database/create_test.go b/src/query/api/v1/handler/database/create_test.go
index f72a4ff540..35f46b2f43 100644
--- a/src/query/api/v1/handler/database/create_test.go
+++ b/src/query/api/v1/handler/database/create_test.go
@@ -50,8 +50,9 @@ import (
)
var (
- testDBCfg = &dbconfig.DBConfiguration{
- ListenAddress: "0.0.0.0:9000",
+ listenAddress = "0.0.0.0:9000"
+ testDBCfg = &dbconfig.DBConfiguration{
+ ListenAddress: &listenAddress,
}
svcDefaultOptions = []handleroptions.ServiceOptionsDefault{
diff --git a/src/query/api/v1/handler/placement/get_test.go b/src/query/api/v1/handler/placement/get_test.go
index 40edc98375..fdae937481 100644
--- a/src/query/api/v1/handler/placement/get_test.go
+++ b/src/query/api/v1/handler/placement/get_test.go
@@ -70,7 +70,9 @@ func setupPlacementTest(t *testing.T, ctrl *gomock.Controller, initPlacement pla
mockClient.EXPECT().Services(gomock.Any()).Return(mockServices, nil).AnyTimes()
mockServices.EXPECT().PlacementService(gomock.Any(), gomock.Any()).DoAndReturn(
func(_ interface{}, opts placement.Options) (placement.Service, error) {
- ps := service.NewPlacementService(storage.NewPlacementStorage(mem.NewStore(), "", opts), opts)
+ ps := service.NewPlacementService(
+ storage.NewPlacementStorage(mem.NewStore(), "", opts),
+ service.WithPlacementOptions(opts))
if initPlacement != nil {
_, err := ps.Set(initPlacement)
require.NoError(t, err)
diff --git a/src/query/api/v1/handler/placement/set_test.go b/src/query/api/v1/handler/placement/set_test.go
index e2aed892e3..fcf05b4c2a 100644
--- a/src/query/api/v1/handler/placement/set_test.go
+++ b/src/query/api/v1/handler/placement/set_test.go
@@ -33,10 +33,10 @@ import (
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/query/generated/proto/admin"
"github.com/m3db/m3/src/x/instrument"
+ xtest "github.com/m3db/m3/src/x/test"
- "github.com/golang/mock/gomock"
"github.com/gogo/protobuf/jsonpb"
- xtest "github.com/m3db/m3/src/x/test"
+ "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/src/query/config/m3coordinator-cluster-template.yml b/src/query/config/m3coordinator-cluster-template.yml
index 79887d3388..bed8f8cae3 100644
--- a/src/query/config/m3coordinator-cluster-template.yml
+++ b/src/query/config/m3coordinator-cluster-template.yml
@@ -1,21 +1,3 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
-tagOptions:
- idScheme: quoted
-
clusters:
## Fill-out the following and un-comment before using, and
## make sure indent by two spaces is applied.
diff --git a/src/query/config/m3coordinator-local-etcd.yml b/src/query/config/m3coordinator-local-etcd.yml
index 0dfefbd45f..1fb1940638 100644
--- a/src/query/config/m3coordinator-local-etcd.yml
+++ b/src/query/config/m3coordinator-local-etcd.yml
@@ -1,18 +1,3 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
clusters:
- namespaces:
- namespace: default
@@ -29,8 +14,3 @@ clusters:
- zone: embedded
endpoints:
- 127.0.0.1:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
-
-tagOptions:
- idScheme: quoted
diff --git a/src/query/config/m3query-dev-etcd.yml b/src/query/config/m3query-dev-etcd.yml
index cdaf363ce9..90a62af0d9 100644
--- a/src/query/config/m3query-dev-etcd.yml
+++ b/src/query/config/m3query-dev-etcd.yml
@@ -1,21 +1,5 @@
# m3query configuration for local development setup. Mostly the same as m3query-local-etcd.yml, but using fewer
# resources (threads primarily).
-
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
clusters:
- namespaces:
- namespace: default
@@ -32,23 +16,6 @@ clusters:
- zone: embedded
endpoints:
- 127.0.0.1:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
- writeTimeout: 10s
- fetchTimeout: 15s
- connectTimeout: 20s
- writeRetry:
- initialBackoff: 500ms
- backoffFactor: 3
- maxRetries: 2
- jitter: true
- fetchRetry:
- initialBackoff: 500ms
- backoffFactor: 2
- maxRetries: 3
- jitter: true
- backgroundHealthCheckFailLimit: 4
- backgroundHealthCheckFailThrottleFactor: 0.5
readWorkerPoolPolicy:
grow: false
@@ -57,12 +24,3 @@ readWorkerPoolPolicy:
writeWorkerPoolPolicy:
grow: false
size: 10
-
-tagOptions:
- idScheme: quoted
-
-# Uncomment this to enable local jaeger tracing. See https://www.jaegertracing.io/docs/1.9/getting-started/
-# for quick local setup (which this config will send data to).
-
-# tracing:
-# backend: jaeger
diff --git a/src/query/config/m3query-local-etcd.yml b/src/query/config/m3query-local-etcd.yml
index f63b801a41..1fb1940638 100644
--- a/src/query/config/m3query-local-etcd.yml
+++ b/src/query/config/m3query-local-etcd.yml
@@ -1,21 +1,3 @@
-listenAddress: 0.0.0.0:7201
-
-logging:
- level: info
-
-metrics:
- scope:
- prefix: "coordinator"
- prometheus:
- handlerPath: /metrics
- listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved
- sanitization: prometheus
- samplingRate: 1.0
- extended: none
-
-tagOptions:
- idScheme: quoted
-
clusters:
- namespaces:
- namespace: default
@@ -32,26 +14,3 @@ clusters:
- zone: embedded
endpoints:
- 127.0.0.1:2379
- writeConsistencyLevel: majority
- readConsistencyLevel: unstrict_majority
- writeTimeout: 10s
- fetchTimeout: 15s
- connectTimeout: 20s
- writeRetry:
- initialBackoff: 500ms
- backoffFactor: 3
- maxRetries: 2
- jitter: true
- fetchRetry:
- initialBackoff: 500ms
- backoffFactor: 2
- maxRetries: 3
- jitter: true
- backgroundHealthCheckFailLimit: 4
- backgroundHealthCheckFailThrottleFactor: 0.5
-
-# Uncomment this to enable local jaeger tracing. See https://www.jaegertracing.io/docs/1.9/getting-started/
-# for quick local setup (which this config will send data to).
-
-# tracing:
-# backend: jaeger
diff --git a/src/query/graphite/native/builtin_functions.go b/src/query/graphite/native/builtin_functions.go
index 4528c07c1d..f757ddb528 100644
--- a/src/query/graphite/native/builtin_functions.go
+++ b/src/query/graphite/native/builtin_functions.go
@@ -1031,11 +1031,6 @@ func asPercent(ctx *common.Context, input singlePathSpec, total genericInterface
toNormalize = input.Values
tf = func(idx int, _ *ts.Series) float64 { return totalBySum(normalized, idx) }
} else {
- // check total is a single-series list and normalize all of them
- if total.Len() != 1 {
- err := errors.NewInvalidParamsError(errors.New("total must be a single series"))
- return ts.NewSeriesList(), err
- }
if len(nodes) > 0 {
// group the series by specified nodes and then sum those groups
groupedTotal, err := groupByNodes(ctx, input, "sum", nodes...)
diff --git a/src/query/server/multi_process.go b/src/query/server/multi_process.go
index 2ce47b5bb7..dda1c0818b 100644
--- a/src/query/server/multi_process.go
+++ b/src/query/server/multi_process.go
@@ -78,18 +78,19 @@ func multiProcessRun(
}
// Set the root scope multi-process process ID.
- if cfg.Metrics.RootScope == nil {
- cfg.Metrics.RootScope = &instrument.ScopeConfiguration{}
+ metrics := cfg.MetricsOrDefault()
+ if metrics.RootScope == nil {
+ metrics.RootScope = &instrument.ScopeConfiguration{}
}
- if cfg.Metrics.RootScope.CommonTags == nil {
- cfg.Metrics.RootScope.CommonTags = make(map[string]string)
+ if metrics.RootScope.CommonTags == nil {
+ metrics.RootScope.CommonTags = make(map[string]string)
}
- cfg.Metrics.RootScope.CommonTags[multiProcessMetricTagID] = multiProcessInstance
+ metrics.RootScope.CommonTags[multiProcessMetricTagID] = multiProcessInstance
// Listen on a different Prometheus metrics handler listen port.
- if cfg.Metrics.PrometheusReporter != nil && cfg.Metrics.PrometheusReporter.ListenAddress != "" {
+ if metrics.PrometheusReporter != nil && metrics.PrometheusReporter.ListenAddress != "" {
// Simply increment the listen address port by instance numbe
- host, port, err := net.SplitHostPort(cfg.Metrics.PrometheusReporter.ListenAddress)
+ host, port, err := net.SplitHostPort(metrics.PrometheusReporter.ListenAddress)
if err != nil {
return multiProcessResult{},
fmt.Errorf("could not split host:port for metrics reporter: %v", err)
@@ -103,7 +104,7 @@ func multiProcessRun(
if portValue > 0 {
// Increment port value by process ID if valid port.
address := net.JoinHostPort(host, strconv.Itoa(portValue+instance-1))
- cfg.Metrics.PrometheusReporter.ListenAddress = address
+ metrics.PrometheusReporter.ListenAddress = address
logger.Info("multi-process prometheus metrics reporter listen address configured",
zap.String("address", address))
}
diff --git a/src/query/server/query.go b/src/query/server/query.go
index 054e7055c1..e30dd1ded0 100644
--- a/src/query/server/query.go
+++ b/src/query/server/query.go
@@ -177,7 +177,7 @@ func Run(runOpts RunOptions) {
listenerOpts = xnet.NewListenerOptions()
)
- logger, err := cfg.Logging.BuildLogger()
+ logger, err := cfg.LoggingOrDefault().BuildLogger()
if err != nil {
// NB(r): Use fmt.Fprintf(os.Stderr, ...) to avoid etcd.SetGlobals()
// sending stdlib "log" to black hole. Don't remove unless with good reason.
@@ -209,7 +209,7 @@ func Run(runOpts RunOptions) {
}
prometheusEngineRegistry := extprom.NewRegistry()
- scope, closer, reporters, err := cfg.Metrics.NewRootScopeAndReporters(
+ scope, closer, reporters, err := cfg.MetricsOrDefault().NewRootScopeAndReporters(
instrument.NewRootScopeAndReportersOptions{
PrometheusExternalRegistries: []instrument.PrometheusExternalRegistry{
{
@@ -436,7 +436,19 @@ func Run(runOpts RunOptions) {
var serviceOptionDefaults []handleroptions.ServiceOptionsDefault
if dbCfg := runOpts.DBConfig; dbCfg != nil {
- cluster, err := dbCfg.EnvironmentConfig.Services.SyncCluster()
+ hostID, err := dbCfg.HostID.Resolve()
+ if err != nil {
+ logger.Fatal("could not resolve hostID",
+ zap.Error(err))
+ }
+
+ envCfg, err := dbCfg.DiscoveryConfig.EnvironmentConfig(hostID)
+ if err != nil {
+ logger.Fatal("could not get env config from discovery config",
+ zap.Error(err))
+ }
+
+ cluster, err := envCfg.Services.SyncCluster()
if err != nil {
logger.Fatal("could not resolve embedded db cluster info",
zap.Error(err))
@@ -488,7 +500,8 @@ func Run(runOpts RunOptions) {
logger.Fatal("unable to register routes", zap.Error(err))
}
- srv := &http.Server{Addr: cfg.ListenAddress, Handler: handler.Router()}
+ listenAddress := cfg.ListenAddressOrDefault()
+ srv := &http.Server{Addr: listenAddress, Handler: handler.Router()}
defer func() {
logger.Info("closing server")
if err := srv.Shutdown(context.Background()); err != nil {
@@ -496,10 +509,10 @@ func Run(runOpts RunOptions) {
}
}()
- listener, err := listenerOpts.Listen("tcp", cfg.ListenAddress)
+ listener, err := listenerOpts.Listen("tcp", listenAddress)
if err != nil {
logger.Fatal("unable to listen on listen address",
- zap.String("address", cfg.ListenAddress),
+ zap.String("address", listenAddress),
zap.Error(err))
}
if runOpts.ListenerCh != nil {
@@ -509,7 +522,7 @@ func Run(runOpts RunOptions) {
logger.Info("starting API server", zap.Stringer("address", listener.Addr()))
if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed {
logger.Fatal("server serve error",
- zap.String("address", cfg.ListenAddress),
+ zap.String("address", listenAddress),
zap.Error(err))
}
}()
@@ -627,45 +640,40 @@ func newM3DBStorage(
namespaces = clusters.ClusterNamespaces()
downsampler downsample.Downsampler
)
- if n := namespaces.NumAggregatedClusterNamespaces(); n > 0 {
- logger.Info("configuring downsampler to use with aggregated cluster namespaces",
- zap.Int("numAggregatedClusterNamespaces", n))
+ logger.Info("configuring downsampler to use with aggregated cluster namespaces",
+ zap.Int("numAggregatedClusterNamespaces", len(namespaces)))
+
+ newDownsamplerFn := func() (downsample.Downsampler, error) {
+ ds, err := newDownsampler(
+ cfg.Downsample, clusterClient,
+ fanoutStorage, clusterNamespacesWatcher,
+ tsdbOpts.TagOptions(), instrumentOptions, rwOpts)
if err != nil {
- return nil, nil, nil, nil, err
+ return nil, err
}
- newDownsamplerFn := func() (downsample.Downsampler, error) {
- downsampler, err := newDownsampler(
- cfg.Downsample, clusterClient,
- fanoutStorage, clusterNamespacesWatcher,
- tsdbOpts.TagOptions(), instrumentOptions, rwOpts)
- if err != nil {
- return nil, err
- }
-
- // Notify the downsampler ready channel that
- // the downsampler has now been created and is ready.
- if downsamplerReadyCh != nil {
- downsamplerReadyCh <- struct{}{}
- }
-
- return downsampler, nil
+ // Notify the downsampler ready channel that
+ // the downsampler has now been created and is ready.
+ if downsamplerReadyCh != nil {
+ downsamplerReadyCh <- struct{}{}
}
- if clusterClientWaitCh != nil {
- // Need to wait before constructing and instead return an async downsampler
- // since the cluster client will return errors until it's initialized itself
- // and will fail constructing the downsampler consequently
- downsampler = downsample.NewAsyncDownsampler(func() (downsample.Downsampler, error) {
- <-clusterClientWaitCh
- return newDownsamplerFn()
- }, nil)
- } else {
- // Otherwise we already have a client and can immediately construct the downsampler
- downsampler, err = newDownsamplerFn()
- if err != nil {
- return nil, nil, nil, nil, err
- }
+ return ds, nil
+ }
+
+ if clusterClientWaitCh != nil {
+ // Need to wait before constructing and instead return an async downsampler
+ // since the cluster client will return errors until it's initialized itself
+ // and will fail constructing the downsampler consequently
+ downsampler = downsample.NewAsyncDownsampler(func() (downsample.Downsampler, error) {
+ <-clusterClientWaitCh
+ return newDownsamplerFn()
+ }, nil)
+ } else {
+ // Otherwise we already have a client and can immediately construct the downsampler
+ downsampler, err = newDownsamplerFn()
+ if err != nil {
+ return nil, nil, nil, nil, err
}
}
@@ -817,7 +825,20 @@ func initClusters(
if dbCfg == nil {
return nil, nil, nil, errors.New("environment config required when dynamically fetching namespaces")
}
- clusterStaticConfig.Client = client.Configuration{EnvironmentConfig: &dbCfg.EnvironmentConfig}
+
+ hostID, err := dbCfg.HostID.Resolve()
+ if err != nil {
+ logger.Fatal("could not resolve hostID",
+ zap.Error(err))
+ }
+
+ envCfg, err := dbCfg.DiscoveryConfig.EnvironmentConfig(hostID)
+ if err != nil {
+ logger.Fatal("could not get env config from discovery config",
+ zap.Error(err))
+ }
+
+ clusterStaticConfig.Client = client.Configuration{EnvironmentConfig: &envCfg}
}
clustersCfg := m3.ClustersStaticConfiguration{clusterStaticConfig}
diff --git a/src/query/storage/m3/cluster_namespaces_watcher.go b/src/query/storage/m3/cluster_namespaces_watcher.go
index 945dbd7166..a630d8f45c 100644
--- a/src/query/storage/m3/cluster_namespaces_watcher.go
+++ b/src/query/storage/m3/cluster_namespaces_watcher.go
@@ -21,7 +21,7 @@
package m3
import (
- xclose "github.com/m3db/m3/src/x/close"
+ xresource "github.com/m3db/m3/src/x/resource"
xwatch "github.com/m3db/m3/src/x/watch"
)
@@ -48,7 +48,9 @@ func (n *clusterNamespacesWatcher) Get() ClusterNamespaces {
return value.(ClusterNamespaces)
}
-func (n *clusterNamespacesWatcher) RegisterListener(listener ClusterNamespacesListener) xclose.SimpleCloser {
+func (n *clusterNamespacesWatcher) RegisterListener(
+ listener ClusterNamespacesListener,
+) xresource.SimpleCloser {
_, watch, _ := n.watchable.Watch()
namespaces := watch.Get()
diff --git a/src/query/storage/m3/types.go b/src/query/storage/m3/types.go
index 9413e92f19..99ef4f4410 100644
--- a/src/query/storage/m3/types.go
+++ b/src/query/storage/m3/types.go
@@ -27,8 +27,8 @@ import (
"github.com/m3db/m3/src/dbnode/namespace"
genericstorage "github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/m3/consolidators"
- xclose "github.com/m3db/m3/src/x/close"
"github.com/m3db/m3/src/x/instrument"
+ xresource "github.com/m3db/m3/src/x/resource"
)
// Cleanup is a cleanup function to be called after resources are freed.
@@ -116,7 +116,7 @@ type ClusterNamespacesWatcher interface {
// RegisterListener registers a listener for updates to cluster namespaces.
// If a value is currently present, it will synchronously call back the listener.
- RegisterListener(listener ClusterNamespacesListener) xclose.SimpleCloser
+ RegisterListener(listener ClusterNamespacesListener) xresource.SimpleCloser
// Close closes the watcher and all descendent watches.
Close()
diff --git a/src/x/checked/checked_mock.go b/src/x/checked/checked_mock.go
index 2c5040ea5e..a546388273 100644
--- a/src/x/checked/checked_mock.go
+++ b/src/x/checked/checked_mock.go
@@ -144,10 +144,10 @@ func (mr *MockBytesMockRecorder) DecWrites() *gomock.Call {
}
// DelayFinalizer mocks base method
-func (m *MockBytes) DelayFinalizer() resource.Closer {
+func (m *MockBytes) DelayFinalizer() resource.SimpleCloser {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DelayFinalizer")
- ret0, _ := ret[0].(resource.Closer)
+ ret0, _ := ret[0].(resource.SimpleCloser)
return ret0
}
diff --git a/src/x/checked/ref.go b/src/x/checked/ref.go
index 569adbe9de..5257683a62 100644
--- a/src/x/checked/ref.go
+++ b/src/x/checked/ref.go
@@ -28,7 +28,7 @@ import (
"sync/atomic"
"unsafe"
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
)
// RefCount is an embeddable checked.Ref.
@@ -103,14 +103,14 @@ func (c *RefCount) finalizeWithLock() {
// until the closer returned by the method is called at least once.
// This is useful for dependent resources requiring the lifetime of this
// entityt to be extended.
-func (c *RefCount) DelayFinalizer() resource.Closer {
+func (c *RefCount) DelayFinalizer() xresource.SimpleCloser {
c.finalizeState.Lock()
c.finalizeState.delayRef++
c.finalizeState.Unlock()
return c
}
-// Close implements resource.Closer for the purpose of use with DelayFinalizer.
+// Close implements xresource.SimpleCloser for the purpose of use with DelayFinalizer.
func (c *RefCount) Close() {
c.finalizeState.Lock()
c.finalizeState.delayRef--
diff --git a/src/x/checked/ref_test.go b/src/x/checked/ref_test.go
index fe0a93827d..c17d751e95 100644
--- a/src/x/checked/ref_test.go
+++ b/src/x/checked/ref_test.go
@@ -30,13 +30,13 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/x/resource"
-
"github.com/leanovate/gopter"
"github.com/leanovate/gopter/gen"
"github.com/leanovate/gopter/prop"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ xresource "github.com/m3db/m3/src/x/resource"
)
func TestRefCountNegativeRefCount(t *testing.T) {
@@ -249,7 +249,7 @@ func TestRefCountDelayFinalizer(t *testing.T) {
elem.IncRef()
elem.DecRef()
- delays := make([]resource.Closer, 0, test.numDelay)
+ delays := make([]xresource.SimpleCloser, 0, test.numDelay)
for i := 0; i < test.numDelay; i++ {
delays = append(delays, elem.DelayFinalizer())
}
diff --git a/src/x/checked/types.go b/src/x/checked/types.go
index 0b0e2f26b7..269fdb91d5 100644
--- a/src/x/checked/types.go
+++ b/src/x/checked/types.go
@@ -22,7 +22,7 @@
package checked
import (
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
)
// Ref is an entity that checks ref counts.
@@ -43,7 +43,7 @@ type Ref interface {
// until the closer returned by the method is called at least once.
// This is useful for dependent resources requiring the lifetime of this
// entityt to be extended.
- DelayFinalizer() resource.Closer
+ DelayFinalizer() xresource.SimpleCloser
// Finalize will call the finalizer if any, ref count must be zero.
Finalize()
diff --git a/src/x/context/context.go b/src/x/context/context.go
index 5d72e11670..c6617e79e8 100644
--- a/src/x/context/context.go
+++ b/src/x/context/context.go
@@ -25,7 +25,7 @@ import (
"sync"
xopentracing "github.com/m3db/m3/src/x/opentracing"
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
lightstep "github.com/lightstep/lightstep-tracer-go"
"github.com/opentracing/opentracing-go"
@@ -52,8 +52,8 @@ type ctx struct {
}
type finalizeable struct {
- finalizer resource.Finalizer
- closer resource.Closer
+ finalizer xresource.Finalizer
+ closer xresource.SimpleCloser
}
// NewContext creates a new context.
@@ -96,7 +96,7 @@ func (c *ctx) IsClosed() bool {
return done
}
-func (c *ctx) RegisterFinalizer(f resource.Finalizer) {
+func (c *ctx) RegisterFinalizer(f xresource.Finalizer) {
parent := c.parentCtx()
if parent != nil {
parent.RegisterFinalizer(f)
@@ -106,7 +106,7 @@ func (c *ctx) RegisterFinalizer(f resource.Finalizer) {
c.registerFinalizeable(finalizeable{finalizer: f})
}
-func (c *ctx) RegisterCloser(f resource.Closer) {
+func (c *ctx) RegisterCloser(f xresource.SimpleCloser) {
parent := c.parentCtx()
if parent != nil {
parent.RegisterCloser(f)
diff --git a/src/x/context/context_test.go b/src/x/context/context_test.go
index f5a36c8c44..b2afe6abfe 100644
--- a/src/x/context/context_test.go
+++ b/src/x/context/context_test.go
@@ -27,12 +27,12 @@ import (
"testing"
"time"
- "github.com/m3db/m3/src/x/resource"
-
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/mocktracer"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ xresource "github.com/m3db/m3/src/x/resource"
)
func TestRegisterFinalizerWithChild(t *testing.T) {
@@ -48,7 +48,7 @@ func TestRegisterFinalizerWithChild(t *testing.T) {
)
wg.Add(1)
- childCtx.RegisterFinalizer(resource.FinalizerFn(func() {
+ childCtx.RegisterFinalizer(xresource.FinalizerFn(func() {
childClosed = true
wg.Done()
}))
@@ -71,7 +71,7 @@ func TestRegisterFinalizer(t *testing.T) {
)
wg.Add(1)
- ctx.RegisterFinalizer(resource.FinalizerFn(func() {
+ ctx.RegisterFinalizer(xresource.FinalizerFn(func() {
closed = true
wg.Done()
}))
@@ -97,7 +97,7 @@ func TestRegisterCloserWithChild(t *testing.T) {
)
wg.Add(1)
- childCtx.RegisterCloser(resource.CloserFn(func() {
+ childCtx.RegisterCloser(xresource.SimpleCloserFn(func() {
childClosed = true
wg.Done()
}))
@@ -120,7 +120,7 @@ func TestRegisterCloser(t *testing.T) {
)
wg.Add(1)
- ctx.RegisterCloser(resource.CloserFn(func() {
+ ctx.RegisterCloser(xresource.SimpleCloserFn(func() {
closed = true
wg.Done()
}))
@@ -136,7 +136,7 @@ func TestRegisterCloser(t *testing.T) {
func TestDoesNotRegisterFinalizerWhenClosed(t *testing.T) {
ctx := NewContext().(*ctx)
ctx.Close()
- ctx.RegisterFinalizer(resource.FinalizerFn(func() {}))
+ ctx.RegisterFinalizer(xresource.FinalizerFn(func() {}))
assert.Equal(t, 0, ctx.numFinalizeables())
}
@@ -145,7 +145,7 @@ func TestDoesNotCloseTwice(t *testing.T) {
ctx := NewContext().(*ctx)
var closed int32
- ctx.RegisterFinalizer(resource.FinalizerFn(func() {
+ ctx.RegisterFinalizer(xresource.FinalizerFn(func() {
atomic.AddInt32(&closed, 1)
}))
@@ -187,7 +187,7 @@ func testDependsOn(t *testing.T, c *ctx) {
other := NewContext().(*ctx)
wg.Add(1)
- c.RegisterFinalizer(resource.FinalizerFn(func() {
+ c.RegisterFinalizer(xresource.FinalizerFn(func() {
atomic.AddInt32(&closed, 1)
wg.Done()
}))
@@ -221,7 +221,7 @@ func TestDependsOnWithChild(t *testing.T) {
)
wg.Add(1)
- c.RegisterFinalizer(resource.FinalizerFn(func() {
+ c.RegisterFinalizer(xresource.FinalizerFn(func() {
atomic.AddInt32(&closed, 1)
wg.Done()
}))
diff --git a/src/x/context/pool_test.go b/src/x/context/pool_test.go
index 7757171243..a2106a4995 100644
--- a/src/x/context/pool_test.go
+++ b/src/x/context/pool_test.go
@@ -23,7 +23,7 @@ package context
import (
"testing"
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/stretchr/testify/assert"
)
@@ -34,7 +34,7 @@ func TestContextPool(t *testing.T) {
ctx := pool.Get()
finalizeCalled := false
- ctx.RegisterFinalizer(resource.FinalizerFn(func() {
+ ctx.RegisterFinalizer(xresource.FinalizerFn(func() {
finalizeCalled = true
}))
ctx.BlockingClose()
diff --git a/src/x/context/types.go b/src/x/context/types.go
index bf152f05ca..d384dc3411 100644
--- a/src/x/context/types.go
+++ b/src/x/context/types.go
@@ -24,7 +24,7 @@ import (
stdctx "context"
"github.com/m3db/m3/src/x/pool"
- "github.com/m3db/m3/src/x/resource"
+ xresource "github.com/m3db/m3/src/x/resource"
"github.com/opentracing/opentracing-go"
)
@@ -47,10 +47,10 @@ type Context interface {
IsClosed() bool
// RegisterFinalizer will register a resource finalizer.
- RegisterFinalizer(resource.Finalizer)
+ RegisterFinalizer(xresource.Finalizer)
// RegisterCloser will register a resource closer.
- RegisterCloser(resource.Closer)
+ RegisterCloser(xresource.SimpleCloser)
// DependsOn will register a blocking context that
// must complete first before finalizers can be called.
diff --git a/src/x/instrument/methods.go b/src/x/instrument/methods.go
index f21a111957..fd61e976ab 100644
--- a/src/x/instrument/methods.go
+++ b/src/x/instrument/methods.go
@@ -251,7 +251,7 @@ func (t *sampledTimer) Start() tally.Stopwatch {
}
func (t *sampledTimer) Stop(startTime tally.Stopwatch) {
- if startTime == nullStopWatchStart { // nolint: badtime
+ if startTime == nullStopWatchStart {
// If startTime is nullStopWatchStart, do nothing.
return
}
diff --git a/src/x/close/close.go b/src/x/resource/close.go
similarity index 70%
rename from src/x/close/close.go
rename to src/x/resource/close.go
index 1b652005fa..668674127f 100644
--- a/src/x/close/close.go
+++ b/src/x/resource/close.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,12 +18,12 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// Package close provides utilities for closing resources.
-package close
+package resource
import (
"errors"
- "io"
+
+ xerrors "github.com/m3db/m3/src/x/errors"
)
var (
@@ -32,32 +32,6 @@ var (
ErrNotCloseable = errors.New("not a closeable resource")
)
-// Closer is a resource that can be closed.
-type Closer interface {
- io.Closer
-}
-
-// CloserFn implements the SimpleCloser interface.
-type CloserFn func() error
-
-// Close implements the SimplerCloser interface.
-func (fn CloserFn) Close() error {
- return fn()
-}
-
-// SimpleCloser is a resource that can be closed without returning a result.
-type SimpleCloser interface {
- Close()
-}
-
-// SimpleCloserFn implements the SimpleCloser interface.
-type SimpleCloserFn func()
-
-// Close implements the SimplerCloser interface.
-func (fn SimpleCloserFn) Close() {
- fn()
-}
-
// TryClose attempts to close a resource, the resource is expected to
// implement either Closeable or CloseableResult.
func TryClose(r interface{}) error {
@@ -70,3 +44,15 @@ func TryClose(r interface{}) error {
}
return ErrNotCloseable
}
+
+// CloseAll closes all closers and combines any errors.
+func CloseAll(closers ...Closer) error {
+ multiErr := xerrors.NewMultiError()
+ for _, closer := range closers {
+ if err := closer.Close(); err != nil {
+ multiErr = multiErr.Add(err)
+ }
+ }
+
+ return multiErr.FinalError()
+}
diff --git a/src/x/close/close_test.go b/src/x/resource/close_test.go
similarity index 92%
rename from src/x/close/close_test.go
rename to src/x/resource/close_test.go
index 926df11e76..a8ab96103b 100644
--- a/src/x/close/close_test.go
+++ b/src/x/resource/close_test.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
+// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -17,14 +17,13 @@
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-
-package close
+package resource
import (
"errors"
"testing"
- "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestTryClose(t *testing.T) {
@@ -53,10 +52,10 @@ func TestTryClose(t *testing.T) {
for _, test := range tests {
err := TryClose(test.input)
if test.expectErr {
- assert.Error(t, err)
+ require.Error(t, err)
continue
}
- assert.NoError(t, err)
+ require.NoError(t, err)
}
}
diff --git a/src/x/resource/types.go b/src/x/resource/types.go
index 05933c593d..42b2b0d02c 100644
--- a/src/x/resource/types.go
+++ b/src/x/resource/types.go
@@ -38,15 +38,28 @@ func (fn FinalizerFn) Finalize() {
fn()
}
-// Closer is an object that can be closed.
-type Closer interface {
+// SimpleCloser is an object that can be closed.
+type SimpleCloser interface {
Close()
}
-// CloserFn is a function literal that is a closer.
-type CloserFn func()
+// SimpleCloserFn is a function literal that is a closer.
+type SimpleCloserFn func()
// Close will call the function literal as a closer.
-func (fn CloserFn) Close() {
+func (fn SimpleCloserFn) Close() {
fn()
}
+
+// Closer is an object that can be closed which returns an error.
+type Closer interface {
+ Close() error
+}
+
+// CloserFn is a function literal that is a closer which returns an error.
+type CloserFn func() error
+
+// Close will call the function literal as a closer.
+func (fn CloserFn) Close() error {
+ return fn()
+}
diff --git a/src/x/watch/source.go b/src/x/watch/source.go
index 4d174e1cb2..aba93b0087 100644
--- a/src/x/watch/source.go
+++ b/src/x/watch/source.go
@@ -24,7 +24,7 @@ import (
"errors"
"sync"
- "github.com/m3db/m3/src/x/close"
+ xresource "github.com/m3db/m3/src/x/resource"
"go.uber.org/zap"
)
@@ -41,7 +41,7 @@ type SourceInput interface {
// Source polls data by calling SourcePollFn and notifies its watches on updates.
type Source interface {
- close.SimpleCloser
+ xresource.SimpleCloser
// Get returns the latest value.
Get() interface{}
diff --git a/src/x/watch/watch.go b/src/x/watch/watch.go
index 42e531fc77..5b35b17578 100644
--- a/src/x/watch/watch.go
+++ b/src/x/watch/watch.go
@@ -25,7 +25,7 @@ import (
"errors"
"sync"
- xclose "github.com/m3db/m3/src/x/close"
+ xresource "github.com/m3db/m3/src/x/resource"
)
var errClosed = errors.New("closed")
@@ -34,7 +34,7 @@ type closer func()
// Updatable can be updated.
type Updatable interface {
- xclose.SimpleCloser
+ xresource.SimpleCloser
// C returns the notification channel for updates.
C() <-chan struct{}
@@ -50,7 +50,7 @@ type Watch interface {
// Watchable can be watched
type Watchable interface {
- xclose.SimpleCloser
+ xresource.SimpleCloser
// IsClosed returns true if the Watchable is closed
IsClosed() bool
diff --git a/tools.go b/tools.go
index 4cc4ead356..f7f7b2b9d2 100644
--- a/tools.go
+++ b/tools.go
@@ -7,8 +7,6 @@ import (
_ "github.com/garethr/kubeval"
_ "github.com/golang/mock/mockgen"
_ "github.com/google/go-jsonnet/cmd/jsonnet"
- _ "github.com/m3db/build-tools/linters/badtime"
- _ "github.com/m3db/build-tools/linters/importorder"
_ "github.com/m3db/build-tools/utilities/genclean"
_ "github.com/m3db/tools/update-license"
_ "github.com/mauricelam/genny"