diff --git a/site/config.toml b/site/config.toml index fac70123cf..88999c2e34 100644 --- a/site/config.toml +++ b/site/config.toml @@ -1,12 +1,12 @@ # Change the default theme to be use when building the site with Hugo [module] [[module.imports]] - path = "github.com/chronosphereio/docs-theme" - # path = "../../../docs-theme" + path = "github.com/chronosphereio/victor" + # path = "../../../victor" [[module.mounts]] source = "content" - target = "content" + target = "content/docs" [[module.mounts]] source = "static" @@ -32,8 +32,23 @@ source = "archetypes" target = "archetypes" -# theme = "docs-theme" +ignoreFiles = [ "\\.ttf$", "\\.woff$", "\\.woff2$", "\\.eot$" ] + +[permalinks] + "/" = "/docs/:section/:title/" + "faqs" = "/docs/:section/:title/" + "glossaries" = "/docs/:section/:title/" + "how_tos" = "/docs/:section/:title/" + "integrations" = "/docs/:section/:title/" + "m3coordinators" = "/docs/:section/:title/" + "m3dbs" = "/docs/:section/:title/" + "m3querys" = "/docs/:section/:title/" + "operational_guides" = "/docs/:section/:title/" + "overviews" = "/docs/:section/:title/" + "quickstarts" = "/docs/:section/:title/" + "troubleshootings" = "/docs/:section/:title/" +# theme = "docs-theme" # baseURL = "/" languageCode = "en-US" defaultContentLanguage = "en" @@ -89,7 +104,8 @@ offlineSearch = false # Useful to give opportunity to people to create merge request for your doc. # See the config.toml file from this documentation site to have an example. # TODO: pattern to branch? - editURL = "https://github.com/m3db/m3/tree/master/site/content/" + # TODO: bring back + # editURL = "https://github.com/m3db/m3/tree/master/site/content/" # Author of the site, will be used in meta information author = "m3" # Description of the site, will be used in meta information @@ -116,6 +132,7 @@ offlineSearch = false # Change default color scheme with a variant one. Can be "red", "blue", "green". themeVariant = "blue" twitter = "m3db_io" + disableHomeIcon = true [params.api] localCordinator = "http://localhost:7201/" @@ -155,12 +172,23 @@ offlineSearch = false taskList = true typographer = true -[[Languages.en.menu.shortcuts]] -name = " GitHub" +[[menu.shortcuts]] +pre = "

More

" +name = " " identifier = "ds" url = "https://github.com/m3db/m3" weight = 10 +[[menu.shortcuts]] +name = " " +url = "https://bit.ly/m3slack" +weight = 11 + +[[menu.shortcuts]] +name = " " +url = "https://groups.google.com/forum/#!forum/m3db" +weight = 12 + [outputs] home = [ "HTML", "RSS", "JSON"] page = [ "HTML"] diff --git a/site/content/_index.md b/site/content/_index.md new file mode 100644 index 0000000000..7272ba3512 --- /dev/null +++ b/site/content/_index.md @@ -0,0 +1,42 @@ +--- +title: M3 Documentation +weight: 1 +--- + + +## About + +After using open-source metrics solutions and finding issues with them at scale – such as reliability, cost, and +operational complexity – [M3](https://github.com/m3db/m3) was created from the ground up to provide Uber with a +native, distributed time series database, a highly-dynamic and performant aggregation service, a query engine, and +other supporting infrastructure. + +## Key Features + +M3 has several features, provided as discrete components, which make it an ideal platform for time series data at scale: + +- A distributed time series database, [M3DB](/docs/m3db/), that provides scalable storage for time series data and a reverse index. +- A sidecar process, [M3Coordinator](/docs/integrations/prometheus), that allows M3DB to act as the long-term storage for Prometheus. +- A distributed query engine, [M3Query](/docs/m3query), with native support for PromQL and Graphite (M3QL coming soon). + +- An aggregation tier, M3Aggregator, that runs as a dedicated metrics aggregator/downsampler allowing metrics to be stored at various retentions at different resolutions. + +## Getting Started + +**Note:** Make sure to read our [Operational Guides](/docs/operational_guide) before running in production! + +Getting started with M3 is as easy as following one of the How-To guides. + +- [Single M3DB node deployment](/docs/quickstart) +- [Clustered M3DB deployment](/docs/how_to/cluster_hard_way) +- [M3DB on Kubernetes](/docs/operator) +- [Isolated M3Query on deployment](/docs/how_to/query) + +## Support + +For support with any issues, questions about M3 or its operation, or to leave any comments, the team can be +reached in a variety of ways: + +- [Slack (main chat channel)](http://bit.ly/m3slack) +- [Email](https://groups.google.com/forum/#!forum/m3db) +- [Github issues](https://github.com/m3db/m3/issues) diff --git a/site/content/docs.md b/site/content/docs.md new file mode 100644 index 0000000000..4446d83bf7 --- /dev/null +++ b/site/content/docs.md @@ -0,0 +1,43 @@ +--- +title: M3 Introduction +weight: 1 +permalink: /docs/ +--- + + +## About + +After using open-source metrics solutions and finding issues with them at scale – such as reliability, cost, and +operational complexity – [M3](https://github.com/m3db/m3) was created from the ground up to provide Uber with a +native, distributed time series database, a highly-dynamic and performant aggregation service, a query engine, and +other supporting infrastructure. + +## Key Features + +M3 has several features, provided as discrete components, which make it an ideal platform for time series data at scale: + +- A distributed time series database, [M3DB](/docs/m3db/), that provides scalable storage for time series data and a reverse index. +- A sidecar process, [M3Coordinator](/docs/integrations/prometheus), that allows M3DB to act as the long-term storage for Prometheus. +- A distributed query engine, [M3Query](/docs/m3query), with native support for PromQL and Graphite (M3QL coming soon). + +- An aggregation tier, M3Aggregator, that runs as a dedicated metrics aggregator/downsampler allowing metrics to be stored at various retentions at different resolutions. + +## Getting Started + +**Note:** Make sure to read our [Operational Guides](/docs/operational_guide) before running in production! + +Getting started with M3 is as easy as following one of the How-To guides. + +- [Single M3DB node deployment](/docs/quickstart) +- [Clustered M3DB deployment](/docs/how_to/cluster_hard_way) +- [M3DB on Kubernetes](/docs/operator) +- [Isolated M3Query on deployment](/docs/how_to/query) + +## Support + +For support with any issues, questions about M3 or its operation, or to leave any comments, the team can be +reached in a variety of ways: + +- [Slack (main chat channel)](http://bit.ly/m3slack) +- [Email](https://groups.google.com/forum/#!forum/m3db) +- [Github issues](https://github.com/m3db/m3/issues) diff --git a/site/content/docs/case_studies/index.md b/site/content/docs/case_studies/index.md deleted file mode 100644 index 3e53c4fd7b..0000000000 --- a/site/content/docs/case_studies/index.md +++ /dev/null @@ -1 +0,0 @@ -# Case Studies diff --git a/site/content/docs/community/index.md b/site/content/docs/community/index.md deleted file mode 100644 index 4c0d01ba29..0000000000 --- a/site/content/docs/community/index.md +++ /dev/null @@ -1,13 +0,0 @@ -# Community - -## How to contact the M3 team? - -Feel free to contact us through any of the following channels: - -1. Posting on the [M3 Google group](https://groups.google.com/forum/#!forum/m3db) -2. Opening issues on the [M3 GitHub page](https://github.com/m3db/m3/issues) -3. Chatting us on the official [Slack](http://bit.ly/m3slack) - -## GitHub/OSS - -Our official GitHub page can be [found here](https://github.com/m3db/m3). diff --git a/site/content/docs/ecosystem/index.md b/site/content/docs/ecosystem/index.md deleted file mode 100644 index e46e912ed8..0000000000 --- a/site/content/docs/ecosystem/index.md +++ /dev/null @@ -1 +0,0 @@ -# Ecosystem diff --git a/site/content/docs/glossary/bootstrapping.md b/site/content/docs/glossary/bootstrapping.md deleted file mode 100644 index ff28f8fb3c..0000000000 --- a/site/content/docs/glossary/bootstrapping.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Bootstrapping -id: bootstrapping -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -Process by which an M3DB node is brought up. Bootstrapping consists of determining the integrity of data that the node has, replay writes from the commit log, and/or stream missing data from its peers. \ No newline at end of file diff --git a/site/content/docs/glossary/cardinality.md b/site/content/docs/glossary/cardinality.md deleted file mode 100644 index b19d87d362..0000000000 --- a/site/content/docs/glossary/cardinality.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Cardinality -id: cardinality -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -The number of unique metrics within the M3DB index. Cardinality increases with the number of unique tag/value combinations that are being emitted. \ No newline at end of file diff --git a/site/content/docs/glossary/datapoint.md b/site/content/docs/glossary/datapoint.md deleted file mode 100644 index bd6db25015..0000000000 --- a/site/content/docs/glossary/datapoint.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Datapoint -id: datapoint -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -A single timestamp/value. Timeseries are composed of multiple datapoints and a series of tag/value pairs \ No newline at end of file diff --git a/site/content/docs/glossary/index.md b/site/content/docs/glossary/index.md deleted file mode 100644 index 225afb5e2d..0000000000 --- a/site/content/docs/glossary/index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Standardized Glossary -layout: glossary -noedit: true -default_active_tag: test -weight: 5 -card: - name: reference - weight: 10 - title: Glossary ---- - diff --git a/site/content/docs/glossary/labels.md b/site/content/docs/glossary/labels.md deleted file mode 100644 index 3ce06367d8..0000000000 --- a/site/content/docs/glossary/labels.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Labels -id: labels -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -Pairs of descriptive words that give meaning to a metric. Tags and Labels are interchangeable terms. - diff --git a/site/content/docs/glossary/m3.md b/site/content/docs/glossary/m3.md deleted file mode 100644 index 56a8077411..0000000000 --- a/site/content/docs/glossary/m3.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: M3 -id: m3 -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -Highly scalable, distributed metrics platform that is comprised of a native, distributed time series database, a highly-dynamic and performant aggregation service, a query engine, and other supporting infrastructure. - diff --git a/site/content/docs/glossary/m3coordinator.md b/site/content/docs/glossary/m3coordinator.md deleted file mode 100644 index b6f0e21394..0000000000 --- a/site/content/docs/glossary/m3coordinator.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: M3Coordinator -id: m3coordinator -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -A service within M3 that coordinates reads and writes between upstream systems, such as Prometheus, and downstream systems, such as M3DB. \ No newline at end of file diff --git a/site/content/docs/glossary/m3db.md b/site/content/docs/glossary/m3db.md deleted file mode 100644 index 6b7d90df94..0000000000 --- a/site/content/docs/glossary/m3db.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: M3DB -id: m3db -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -Distributed time series database influenced by Gorilla and Cassandra released as open source by Uber Technologies. - diff --git a/site/content/docs/glossary/m3query.md b/site/content/docs/glossary/m3query.md deleted file mode 100644 index 094fcdf1fb..0000000000 --- a/site/content/docs/glossary/m3query.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: M3Query -id: m3query -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -A distributed query engine for M3DB. Unlike M3Coordinator, M3Query only provides supports for reads. \ No newline at end of file diff --git a/site/content/docs/glossary/metric.md b/site/content/docs/glossary/metric.md deleted file mode 100644 index da6eb1f0f9..0000000000 --- a/site/content/docs/glossary/metric.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Metric -id: metric -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -A collection of uniquely identifiable tags. \ No newline at end of file diff --git a/site/content/docs/glossary/namespace.md b/site/content/docs/glossary/namespace.md deleted file mode 100644 index 83ad0941e4..0000000000 --- a/site/content/docs/glossary/namespace.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Namespace -id: namespace -date: 2019-06-15 -full_link: -short_description: Configuration for a set of data -aka: -tags: -- table -- configuration ---- - -Similar to a table in other types of databases, namespaces in M3DB have a unique name and a set of configuration options, such as data retention and block size. \ No newline at end of file diff --git a/site/content/docs/glossary/placement.md b/site/content/docs/glossary/placement.md deleted file mode 100644 index da138d379d..0000000000 --- a/site/content/docs/glossary/placement.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Placement -id: placement -date: 2019-06-15 -full_link: -short_description: Map of cluster shard replicas to nodes -aka: -- topology -tags: -- topology -- shards ---- - -Map of the M3DB cluster's shard replicas to nodes. Each M3DB cluster has only one placement. Placement and Topology are interchangeable terms. \ No newline at end of file diff --git a/site/content/docs/glossary/shard.md b/site/content/docs/glossary/shard.md deleted file mode 100644 index 0e7e016122..0000000000 --- a/site/content/docs/glossary/shard.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Shard -id: shard -date: 2019-06-15 -full_link: -short_description: Distribution of time series data -aka: -tags: -- example -- illustration ---- - -Effectively the same as a "virtual shard" in Cassandra in that it provides an arbitrary distribution of time series data via a simple hash of the series ID. \ No newline at end of file diff --git a/site/content/docs/glossary/tags.md b/site/content/docs/glossary/tags.md deleted file mode 100644 index 7e69e8609a..0000000000 --- a/site/content/docs/glossary/tags.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Tags -id: tags -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -Pairs of descriptive words that give meaning to a metric. Tags and Labels are interchangeable terms. \ No newline at end of file diff --git a/site/content/docs/glossary/timeseries.md b/site/content/docs/glossary/timeseries.md deleted file mode 100644 index 63a7b24a23..0000000000 --- a/site/content/docs/glossary/timeseries.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Timeseries -id: timeseries -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -A series of data points tracking a particular metric over time. \ No newline at end of file diff --git a/site/content/docs/glossary/topology.md b/site/content/docs/glossary/topology.md deleted file mode 100644 index 0f3770d2f8..0000000000 --- a/site/content/docs/glossary/topology.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Topology -id: topology -date: 2019-06-15 -full_link: -short_description: > - I am a test term. - -aka: -tags: -- example -- illustration ---- - -Map of the M3DB cluster's shard replicas to nodes. Each M3DB cluster has only one placement. Placement and Topology are interchangeable terms. - diff --git a/site/content/docs/how_to/kubernetes.md b/site/content/docs/how_to/kubernetes.md deleted file mode 100644 index 92fbb6d74f..0000000000 --- a/site/content/docs/how_to/kubernetes.md +++ /dev/null @@ -1,326 +0,0 @@ ---- -title: M3DB on Kubernetes -weight: 3 ---- - - -**Please note:** If possible _[PLEASE USE THE OPERATOR](https://operator.m3db.io/)_ to deploy to Kubernetes if you -can. It is a considerably more streamlined setup. - -The [operator](https://operator.m3db.io/) leverages [custom resource definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) -(CRDs) to automatically handle operations such as managing cluster topology. - -The guide below provides static manifests to bootstrap a cluster on Kubernetes and should be considered -as a guide to running M3 on Kubernetes, if and only if you have significant custom requirements not satisfied by -the operator. - -## Prerequisites - -M3DB performs better when it has access to fast disks. Every incoming write is written to a commit log, which at high -volumes of writes can be sensitive to spikes in disk latency. Additionally the random seeks into files when loading cold -files benefit from lower random read latency. - -Because of this, the included manifests reference a -[StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) named `fast`. Manifests are -provided to provide such a StorageClass on AWS / Azure / GCP using the respective cloud provider's premium disk class. - -If you do not already have a StorageClass named `fast`, create one using one of the provided manifests: - -```shell -# AWS EBS (class io1) -kubectl apply -f https://raw.githubusercontent.com/m3db/m3/master/kube/storage-fast-aws.yaml - -# Azure premium LRS -kubectl apply -f https://raw.githubusercontent.com/m3db/m3/master/kube/storage-fast-azure.yaml - -# GCE Persistent SSD -kubectl apply -f https://raw.githubusercontent.com/m3db/m3/master/kube/storage-fast-gcp.yaml -``` - -If you wish to use your cloud provider's default remote disk, or another disk class entirely, you'll have to modify them -manifests. - -If your Kubernetes cluster spans multiple availability zones, it's important to specify a [Volume Binding Mode](https://kubernetes.io/docs/concepts/storage/storage-classes/#volume-binding-mode) of `WaitForFirstConsumer` in your StorageClass to delay the binding of the PersistentVolume until the Pod is created. - -### Kernel Configuration - -We provide a Kubernetes daemonset that can make setting host-level sysctls easier. Please see the [kernel][/docs/operational_guide/kernel_configuration] docs -for more. - -Note that our default StatefulSet spec will give the M3DB container `CAP_SYS_RESOURCE` so it may raise its file limits. -Uncomment the `securityContext` on the `m3db` container in the StatefulSet if running with a Pod Security Policy or -similar enforcement mechanism that prevents adding capabilities to containers. - -## Deploying - -Apply the following manifest to create your cluster: - -```shell -kubectl apply -f https://raw.githubusercontent.com/m3db/m3/master/kube/bundle.yaml -``` - -Applying this bundle will create the following resources: - -1. An `m3db` [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) for - all M3DB-related resources. -2. A 3-node etcd cluster in the form of a - [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) backed by persistent - remote SSDs. This cluster stores the DB topology and other runtime configuration data. -3. A 3-node M3DB cluster in the form of a StatefulSet. -4. [Headless services](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#services) for - the etcd and m3db StatefulSets to provide stable DNS hostnames per-pod. - -Wait until all created pods are listed as ready: - -```shell -$ kubectl -n m3db get po -NAME READY STATUS RESTARTS AGE -etcd-0 1/1 Running 0 22m -etcd-1 1/1 Running 0 22m -etcd-2 1/1 Running 0 22m -m3dbnode-0 1/1 Running 0 22m -m3dbnode-1 1/1 Running 0 22m -m3dbnode-2 1/1 Running 0 22m -``` - -You can now proceed to initialize a namespace and placement for the cluster the same as you would for our other how-to -guides: - -```shell -# Open a local connection to the coordinator service: -$ kubectl -n m3db port-forward svc/m3coordinator 7201 -Forwarding from 127.0.0.1:7201 -> 7201 -Forwarding from [::1]:7201 -> 7201 -``` - -```shell -# Create an initial cluster topology -curl -sSf -X POST localhost:7201/api/v1/services/m3db/placement/init -d '{ - "num_shards": 1024, - "replication_factor": 3, - "instances": [ - { - "id": "m3dbnode-0", - "isolation_group": "pod0", - "zone": "embedded", - "weight": 100, - "endpoint": "m3dbnode-0.m3dbnode:9000", - "hostname": "m3dbnode-0.m3dbnode", - "port": 9000 - }, - { - "id": "m3dbnode-1", - "isolation_group": "pod1", - "zone": "embedded", - "weight": 100, - "endpoint": "m3dbnode-1.m3dbnode:9000", - "hostname": "m3dbnode-1.m3dbnode", - "port": 9000 - }, - { - "id": "m3dbnode-2", - "isolation_group": "pod2", - "zone": "embedded", - "weight": 100, - "endpoint": "m3dbnode-2.m3dbnode:9000", - "hostname": "m3dbnode-2.m3dbnode", - "port": 9000 - } - ] -}' -``` - -```shell -# Create a namespace to hold your metrics -curl -X POST localhost:7201/api/v1/services/m3db/namespace -d '{ - "name": "default", - "options": { - "bootstrapEnabled": true, - "flushEnabled": true, - "writesToCommitLog": true, - "cleanupEnabled": true, - "snapshotEnabled": true, - "repairEnabled": false, - "retentionOptions": { - "retentionPeriodDuration": "720h", - "blockSizeDuration": "12h", - "bufferFutureDuration": "1h", - "bufferPastDuration": "1h", - "blockDataExpiry": true, - "blockDataExpiryAfterNotAccessPeriodDuration": "5m" - }, - "indexOptions": { - "enabled": true, - "blockSizeDuration": "12h" - } - } -}' -``` - -Shortly after you should see your nodes finish bootstrapping: - -```shell -$ kubectl -n m3db logs -f m3dbnode-0 -21:36:54.831698[I] cluster database initializing topology -21:36:54.831732[I] cluster database resolving topology -21:37:22.821740[I] resolving namespaces with namespace watch -21:37:22.821813[I] updating database namespaces [{adds [metrics]} {updates []} {removals []}] -21:37:23.008109[I] node tchannelthrift: listening on 0.0.0.0:9000 -21:37:23.008384[I] cluster tchannelthrift: listening on 0.0.0.0:9001 -21:37:23.217090[I] node httpjson: listening on 0.0.0.0:9002 -21:37:23.217240[I] cluster httpjson: listening on 0.0.0.0:9003 -21:37:23.217526[I] bootstrapping shards for range starting [{run bootstrap-data} {bootstrapper filesystem} ... -... -21:37:23.239534[I] bootstrap data fetched now initializing shards with series blocks [{namespace metrics} {numShards 256} {numSeries 0}] -21:37:23.240778[I] bootstrap finished [{namespace metrics} {duration 23.325194ms}] -21:37:23.240856[I] bootstrapped -21:37:29.733025[I] successfully updated topology to 3 hosts -``` - -You can now write and read metrics using the API on the DB nodes: - -```shell -$ kubectl -n m3db port-forward svc/m3dbnode 9003 -Forwarding from 127.0.0.1:9003 -> 9003 -Forwarding from [::1]:9003 -> 9003 -``` - -```shell -curl -sSf -X POST localhost:9003/writetagged -d '{ - "namespace": "default", - "id": "foo", - "tags": [ - { - "name": "city", - "value": "new_york" - }, - { - "name": "endpoint", - "value": "/request" - } - ], - "datapoint": { - "timestamp": '"$(date "+%s")"', - "value": 42.123456789 - } -}' -``` - -```shell -$ curl -sSf -X POST http://localhost:9003/query -d '{ - "namespace": "default", - "query": { - "regexp": { - "field": "city", - "regexp": ".*" - } - }, - "rangeStart": 0, - "rangeEnd": '"$(date "+%s")"' -}' | jq . - -{ - "results": [ - { - "id": "foo", - "tags": [ - { - "name": "city", - "value": "new_york" - }, - { - "name": "endpoint", - "value": "/request" - } - ], - "datapoints": [ - { - "timestamp": 1527630053, - "value": 42.123456789 - } - ] - } - ], - "exhaustive": true -} -``` - -To read and write metrics via the Coordinator (and not directly through the DB node API), you must mark the namespace as ready: - -```shell -curl -X POST http://localhost:7201/api/v1/services/m3db/namespace/ready -d '{ - "name": "default" -} -``` - -You should now be able to use both the Coordinator and DB node API to perform reads and writes. - -### Adding nodes - -You can easily scale your M3DB cluster by scaling the StatefulSet and informing the cluster topology of the change: - -```shell -kubectl -n m3db scale --replicas=4 statefulset/m3dbnode -``` - -Once the pod is ready you can modify the cluster topology: - -```shell -kubectl -n m3db port-forward svc/m3coordinator 7201 -Forwarding from 127.0.0.1:7201 -> 7201 -Forwarding from [::1]:7201 -> 7201 -``` - -```shell -curl -sSf -X POST localhost:7201/api/v1/services/m3db/placement -d '{ - "instances": [ - { - "id": "m3dbnode-3", - "isolation_group": "pod3", - "zone": "embedded", - "weight": 100, - "endpoint": "m3dbnode-3.m3dbnode:9000", - "hostname": "m3dbnode-3.m3dbnode", - "port": 9000 - } - ] -}' -``` - -## Integrations - -### Prometheus - -As mentioned in our integrations [guide](/docs/integrations/prometheus), M3DB can be used as a [remote read/write -endpoint](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#%3Cremote_write%3E) for Prometheus. - -If you run Prometheus on your Kubernetes cluster you can easily point it at M3DB in your Prometheus server config: - -```YAML -remote_read: - - url: "http://m3coordinator.m3db.svc.cluster.local:7201/api/v1/prom/remote/read" - # To test reading even when local Prometheus has the data - read_recent: true - -remote_write: - - url: "http://m3coordinator.m3db.svc.cluster.local:7201/api/v1/prom/remote/write" - # To differentiate between local and remote storage we will add a storage label - write_relabel_configs: - - target_label: metrics_storage - replacement: m3db_remote -``` - -## Scheduling - -In some cases, you might prefer M3DB to run on certain nodes in your cluster. For example: if your cluster is comprised -of different instance types and some have more memory than others then you'd like M3DB to run on those nodes if -possible. To accommodate this, the pods created by the StatefulSets use [pod -affinities](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) and -[tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to prefer to run on -certain nodes. Specifically: - -1. The pods tolerate the taint `"dedicated-m3db"` to run on nodes that are specifically dedicated to m3db if you so - choose. -2. Via `nodeAffinity` the pods prefer to run on nodes with the label `m3db.io/dedicated-m3db="true"`. - -[kernel]: /operational_guide/kernel_configuration diff --git a/site/content/docs/overview/roadmap.md b/site/content/docs/overview/roadmap.md deleted file mode 100644 index 629b300581..0000000000 --- a/site/content/docs/overview/roadmap.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: "Roadmap" -weight: 4 ---- - -This roadmap is open for suggestions and currently just a small snapshot of what is coming up. - -Short: -- Add diagrams of what using M3 looks like (broken down by use case) -- Improve operational guides for the aggregator -- Add tutorials for a variety of use cases -- Add design documentation of reverse index -- Add design documentation of aggregator - -Medium: -- Plan what a v1.0 release looks like diff --git a/site/content/docs/faqs/_index.md b/site/content/faqs/_index.md similarity index 100% rename from site/content/docs/faqs/_index.md rename to site/content/faqs/_index.md diff --git a/site/content/docs/how_to/_index.md b/site/content/how_to/_index.md similarity index 100% rename from site/content/docs/how_to/_index.md rename to site/content/how_to/_index.md diff --git a/site/content/docs/how_to/aggregator.md b/site/content/how_to/aggregator.md similarity index 99% rename from site/content/docs/how_to/aggregator.md rename to site/content/how_to/aggregator.md index d20a5fe629..807ec21679 100644 --- a/site/content/docs/how_to/aggregator.md +++ b/site/content/how_to/aggregator.md @@ -1,11 +1,9 @@ --- -title: Setting up M3Aggregator +title: Setting up M3 Aggregator menuTitle: M3Aggregator weight: 5 --- - - ## Introduction `m3aggregator` is used to cluster stateful downsampling and rollup of metrics before they are store in M3DB. The M3 Coordinator also performs this role but is not cluster aware. This means metrics will not get aggregated properly if you send metrics in round robin fashion to multiple M3 Coordinators for the same metrics ingestion source (e.g. Prometheus server). diff --git a/site/content/docs/how_to/cluster_hard_way.md b/site/content/how_to/cluster_hard_way.md similarity index 100% rename from site/content/docs/how_to/cluster_hard_way.md rename to site/content/how_to/cluster_hard_way.md diff --git a/site/content/docs/how_to/grafana.md b/site/content/how_to/grafana.md similarity index 100% rename from site/content/docs/how_to/grafana.md rename to site/content/how_to/grafana.md diff --git a/site/content/docs/how_to/graphite.md b/site/content/how_to/graphite.md similarity index 100% rename from site/content/docs/how_to/graphite.md rename to site/content/how_to/graphite.md diff --git a/site/content/docs/how_to/monitoring_m3/_index.md b/site/content/how_to/monitoring_m3/_index.md similarity index 100% rename from site/content/docs/how_to/monitoring_m3/_index.md rename to site/content/how_to/monitoring_m3/_index.md diff --git a/site/content/docs/how_to/monitoring_m3/alerts.md b/site/content/how_to/monitoring_m3/alerts.md similarity index 100% rename from site/content/docs/how_to/monitoring_m3/alerts.md rename to site/content/how_to/monitoring_m3/alerts.md diff --git a/site/content/docs/how_to/monitoring_m3/metrics.md b/site/content/how_to/monitoring_m3/metrics.md similarity index 100% rename from site/content/docs/how_to/monitoring_m3/metrics.md rename to site/content/how_to/monitoring_m3/metrics.md diff --git a/site/content/docs/how_to/monitoring_m3/tracing.md b/site/content/how_to/monitoring_m3/tracing.md similarity index 100% rename from site/content/docs/how_to/monitoring_m3/tracing.md rename to site/content/how_to/monitoring_m3/tracing.md diff --git a/site/content/docs/how_to/other/_index.md b/site/content/how_to/other/_index.md similarity index 100% rename from site/content/docs/how_to/other/_index.md rename to site/content/how_to/other/_index.md diff --git a/site/content/docs/how_to/other/tsdb.md b/site/content/how_to/other/tsdb.md similarity index 100% rename from site/content/docs/how_to/other/tsdb.md rename to site/content/how_to/other/tsdb.md diff --git a/site/content/docs/how_to/other/upgrading.md b/site/content/how_to/other/upgrading.md similarity index 100% rename from site/content/docs/how_to/other/upgrading.md rename to site/content/how_to/other/upgrading.md diff --git a/site/content/docs/how_to/prometheus.md b/site/content/how_to/prometheus.md similarity index 100% rename from site/content/docs/how_to/prometheus.md rename to site/content/how_to/prometheus.md diff --git a/site/content/docs/how_to/query.md b/site/content/how_to/query.md similarity index 92% rename from site/content/docs/how_to/query.md rename to site/content/how_to/query.md index 6e30a85a9b..41f843feca 100644 --- a/site/content/docs/how_to/query.md +++ b/site/content/how_to/query.md @@ -1,11 +1,10 @@ --- -title: Setting up m3query +title: Setting up M3 Query menuTitle: M3Query weight: 4 --- - -m3query is used to query data that is stored in M3DB. For instance, if you are using the Prometheus remote write endpoint with [m3coordinator](/docs/integrations/prometheus), you can use m3query instead of the Prometheus remote read endpoint. By doing so, you get all of the benefits of m3query's engine such as [block processing](/docs/m3query/architecture/blocks/). Furthermore, since m3query provides a Prometheus compatible API, you can use 3rd party graphing and alerting solutions like Grafana. +M3 Query is used to query data that is stored in M3DB. For instance, if you are using the Prometheus remote write endpoint with [m3coordinator](/docs/integrations/prometheus), you can use m3query instead of the Prometheus remote read endpoint. By doing so, you get all of the benefits of m3query's engine such as [block processing](/docs/m3query/architecture/blocks/). Furthermore, since m3query provides a Prometheus compatible API, you can use 3rd party graphing and alerting solutions like Grafana. ## Configuration diff --git a/site/content/docs/how_to/use_as_tsdb.md b/site/content/how_to/use_as_tsdb.md similarity index 98% rename from site/content/docs/how_to/use_as_tsdb.md rename to site/content/how_to/use_as_tsdb.md index ba67d35fef..53a5da02be 100644 --- a/site/content/docs/how_to/use_as_tsdb.md +++ b/site/content/how_to/use_as_tsdb.md @@ -114,7 +114,7 @@ For more details on the compression scheme and its limitations, review [the docu #### M3DB setup -For more advanced setups, it's best to follow the guides on how to configure an M3DB cluster [manually](/docs/how_to/cluster_hard_way) or [using Kubernetes](/docs/how_to/kubernetes). However, this tutorial will walk you through configuring a single node setup locally for development. +For more advanced setups, it's best to follow the guides on how to configure an M3DB cluster [manually](/docs/how_to/cluster_hard_way) or [using Kubernetes](/docs/operator). However, this tutorial will walk you through configuring a single node setup locally for development. First, run the following command to pull the latest M3DB image: diff --git a/site/content/docs/includes/headers_optional_read_all.md b/site/content/includes/headers_optional_read_all.md similarity index 100% rename from site/content/docs/includes/headers_optional_read_all.md rename to site/content/includes/headers_optional_read_all.md diff --git a/site/content/docs/includes/headers_optional_read_limits.md b/site/content/includes/headers_optional_read_limits.md similarity index 100% rename from site/content/docs/includes/headers_optional_read_limits.md rename to site/content/includes/headers_optional_read_limits.md diff --git a/site/content/docs/includes/headers_optional_read_write.md b/site/content/includes/headers_optional_read_write.md similarity index 100% rename from site/content/docs/includes/headers_optional_read_write.md rename to site/content/includes/headers_optional_read_write.md diff --git a/site/content/docs/includes/headers_optional_read_write_all.md b/site/content/includes/headers_optional_read_write_all.md similarity index 100% rename from site/content/docs/includes/headers_optional_read_write_all.md rename to site/content/includes/headers_optional_read_write_all.md diff --git a/site/content/docs/includes/headers_optional_write_all.md b/site/content/includes/headers_optional_write_all.md similarity index 100% rename from site/content/docs/includes/headers_optional_write_all.md rename to site/content/includes/headers_optional_write_all.md diff --git a/site/content/docs/includes/headers_placement_namespace.md b/site/content/includes/headers_placement_namespace.md similarity index 100% rename from site/content/docs/includes/headers_placement_namespace.md rename to site/content/includes/headers_placement_namespace.md diff --git a/site/content/docs/includes/index.md b/site/content/includes/index.md similarity index 100% rename from site/content/docs/includes/index.md rename to site/content/includes/index.md diff --git a/site/content/docs/includes/podtemplate.json b/site/content/includes/podtemplate.json similarity index 100% rename from site/content/docs/includes/podtemplate.json rename to site/content/includes/podtemplate.json diff --git a/site/content/docs/includes/quickstart/create-database.sh b/site/content/includes/quickstart/create-database.sh similarity index 100% rename from site/content/docs/includes/quickstart/create-database.sh rename to site/content/includes/quickstart/create-database.sh diff --git a/site/content/docs/includes/quickstart/write-metrics-1.sh b/site/content/includes/quickstart/write-metrics-1.sh similarity index 100% rename from site/content/docs/includes/quickstart/write-metrics-1.sh rename to site/content/includes/quickstart/write-metrics-1.sh diff --git a/site/content/docs/includes/quickstart/write-metrics-2.sh b/site/content/includes/quickstart/write-metrics-2.sh similarity index 100% rename from site/content/docs/includes/quickstart/write-metrics-2.sh rename to site/content/includes/quickstart/write-metrics-2.sh diff --git a/site/content/docs/includes/quickstart/write-metrics-3.sh b/site/content/includes/quickstart/write-metrics-3.sh similarity index 100% rename from site/content/docs/includes/quickstart/write-metrics-3.sh rename to site/content/includes/quickstart/write-metrics-3.sh diff --git a/site/content/docs/_index.md b/site/content/index.md similarity index 97% rename from site/content/docs/_index.md rename to site/content/index.md index 708a9f53cc..79fd428af7 100644 --- a/site/content/docs/_index.md +++ b/site/content/index.md @@ -29,7 +29,7 @@ Getting started with M3 is as easy as following one of the How-To guides. - [Single M3DB node deployment](/docs/quickstart) - [Clustered M3DB deployment](/docs/how_to/cluster_hard_way) -- [M3DB on Kubernetes](/docs/how_to/kubernetes) +- [M3DB on Kubernetes](/docs/operator) - [Isolated M3Query on deployment](/docs/how_to/query) ## Support diff --git a/site/content/docs/integrations/_index.md b/site/content/integrations/_index.md similarity index 100% rename from site/content/docs/integrations/_index.md rename to site/content/integrations/_index.md diff --git a/site/content/docs/integrations/grafana.md b/site/content/integrations/grafana.md similarity index 100% rename from site/content/docs/integrations/grafana.md rename to site/content/integrations/grafana.md diff --git a/site/content/docs/integrations/graphite.md b/site/content/integrations/graphite.md similarity index 100% rename from site/content/docs/integrations/graphite.md rename to site/content/integrations/graphite.md diff --git a/site/content/docs/integrations/influx.md b/site/content/integrations/influx.md similarity index 100% rename from site/content/docs/integrations/influx.md rename to site/content/integrations/influx.md diff --git a/site/content/docs/integrations/prometheus.md b/site/content/integrations/prometheus.md similarity index 100% rename from site/content/docs/integrations/prometheus.md rename to site/content/integrations/prometheus.md diff --git a/site/content/docs/m3coordinator/_index.md b/site/content/m3coordinator/_index.md similarity index 98% rename from site/content/docs/m3coordinator/_index.md rename to site/content/m3coordinator/_index.md index e66a19c734..5d2a0d11dd 100644 --- a/site/content/docs/m3coordinator/_index.md +++ b/site/content/m3coordinator/_index.md @@ -1,7 +1,7 @@ --- title: "M3 Coordinator, API for reading/writing metrics and management" menuTitle: "M3 Coordinator" -weight: 4 +weight: 5 chapter: true --- diff --git a/site/content/docs/m3coordinator/api/remote.md b/site/content/m3coordinator/api/remote.md similarity index 100% rename from site/content/docs/m3coordinator/api/remote.md rename to site/content/m3coordinator/api/remote.md diff --git a/site/content/docs/m3db/_index.md b/site/content/m3db/_index.md similarity index 99% rename from site/content/docs/m3db/_index.md rename to site/content/m3db/_index.md index df5693f0cb..cd43f857b7 100644 --- a/site/content/docs/m3db/_index.md +++ b/site/content/m3db/_index.md @@ -1,7 +1,7 @@ --- title: "M3DB, a distributed time series database" menuTitle: "M3DB" -weight: 3 +weight: 4 chapter: true --- diff --git a/site/content/docs/m3db/architecture/_index.md b/site/content/m3db/architecture/_index.md similarity index 98% rename from site/content/docs/m3db/architecture/_index.md rename to site/content/m3db/architecture/_index.md index 06c8ea1023..3796fbcd83 100644 --- a/site/content/docs/m3db/architecture/_index.md +++ b/site/content/m3db/architecture/_index.md @@ -1,7 +1,6 @@ --- title: "Architecture" weight: 2 -chapter: true --- ## Overview diff --git a/site/content/docs/m3db/architecture/caching.md b/site/content/m3db/architecture/caching.md similarity index 100% rename from site/content/docs/m3db/architecture/caching.md rename to site/content/m3db/architecture/caching.md diff --git a/site/content/docs/m3db/architecture/commitlogs.md b/site/content/m3db/architecture/commitlogs.md similarity index 100% rename from site/content/docs/m3db/architecture/commitlogs.md rename to site/content/m3db/architecture/commitlogs.md diff --git a/site/content/docs/m3db/architecture/consistencylevels.md b/site/content/m3db/architecture/consistencylevels.md similarity index 100% rename from site/content/docs/m3db/architecture/consistencylevels.md rename to site/content/m3db/architecture/consistencylevels.md diff --git a/site/content/docs/m3db/architecture/engine.md b/site/content/m3db/architecture/engine.md similarity index 100% rename from site/content/docs/m3db/architecture/engine.md rename to site/content/m3db/architecture/engine.md diff --git a/site/content/docs/m3db/architecture/peer_streaming.md b/site/content/m3db/architecture/peer_streaming.md similarity index 100% rename from site/content/docs/m3db/architecture/peer_streaming.md rename to site/content/m3db/architecture/peer_streaming.md diff --git a/site/content/docs/m3db/architecture/sharding.md b/site/content/m3db/architecture/sharding.md similarity index 100% rename from site/content/docs/m3db/architecture/sharding.md rename to site/content/m3db/architecture/sharding.md diff --git a/site/content/docs/m3db/architecture/storage.md b/site/content/m3db/architecture/storage.md similarity index 100% rename from site/content/docs/m3db/architecture/storage.md rename to site/content/m3db/architecture/storage.md diff --git a/site/content/docs/m3db/monodraw/m3db-file-layout.monopic b/site/content/m3db/monodraw/m3db-file-layout.monopic similarity index 100% rename from site/content/docs/m3db/monodraw/m3db-file-layout.monopic rename to site/content/m3db/monodraw/m3db-file-layout.monopic diff --git a/site/content/docs/m3db/monodraw/m3db_structs.monopic b/site/content/m3db/monodraw/m3db_structs.monopic similarity index 100% rename from site/content/docs/m3db/monodraw/m3db_structs.monopic rename to site/content/m3db/monodraw/m3db_structs.monopic diff --git a/site/content/docs/m3db/monodraw/peer_bootstrap.monopic b/site/content/m3db/monodraw/peer_bootstrap.monopic similarity index 100% rename from site/content/docs/m3db/monodraw/peer_bootstrap.monopic rename to site/content/m3db/monodraw/peer_bootstrap.monopic diff --git a/site/content/docs/m3db/monodraw/placement_monodraw_template.monopic b/site/content/m3db/monodraw/placement_monodraw_template.monopic similarity index 100% rename from site/content/docs/m3db/monodraw/placement_monodraw_template.monopic rename to site/content/m3db/monodraw/placement_monodraw_template.monopic diff --git a/site/content/docs/m3db/monodraw/placement_state_machine.monopic b/site/content/m3db/monodraw/placement_state_machine.monopic similarity index 100% rename from site/content/docs/m3db/monodraw/placement_state_machine.monopic rename to site/content/m3db/monodraw/placement_state_machine.monopic diff --git a/site/content/docs/m3query/_index.md b/site/content/m3query/_index.md similarity index 100% rename from site/content/docs/m3query/_index.md rename to site/content/m3query/_index.md diff --git a/site/content/docs/m3query/api/query.md b/site/content/m3query/api/query.md similarity index 100% rename from site/content/docs/m3query/api/query.md rename to site/content/m3query/api/query.md diff --git a/site/content/docs/m3query/architecture/_index.md b/site/content/m3query/architecture/_index.md similarity index 100% rename from site/content/docs/m3query/architecture/_index.md rename to site/content/m3query/architecture/_index.md diff --git a/site/content/docs/m3query/architecture/blocks.md b/site/content/m3query/architecture/blocks.md similarity index 100% rename from site/content/docs/m3query/architecture/blocks.md rename to site/content/m3query/architecture/blocks.md diff --git a/site/content/docs/m3query/architecture/fanout.md b/site/content/m3query/architecture/fanout.md similarity index 100% rename from site/content/docs/m3query/architecture/fanout.md rename to site/content/m3query/architecture/fanout.md diff --git a/site/content/docs/m3query/architecture/functions.md b/site/content/m3query/architecture/functions.md similarity index 100% rename from site/content/docs/m3query/architecture/functions.md rename to site/content/m3query/architecture/functions.md diff --git a/site/content/docs/m3query/config/_index.md b/site/content/m3query/config/_index.md similarity index 100% rename from site/content/docs/m3query/config/_index.md rename to site/content/m3query/config/_index.md diff --git a/site/content/docs/m3query/config/annotated_config.md b/site/content/m3query/config/annotated_config.md similarity index 100% rename from site/content/docs/m3query/config/annotated_config.md rename to site/content/m3query/config/annotated_config.md diff --git a/site/content/docs/m3query/config/annotated_config.yaml b/site/content/m3query/config/annotated_config.yaml similarity index 100% rename from site/content/docs/m3query/config/annotated_config.yaml rename to site/content/m3query/config/annotated_config.yaml diff --git a/site/content/docs/operational_guide/_index.md b/site/content/operational_guide/_index.md similarity index 100% rename from site/content/docs/operational_guide/_index.md rename to site/content/operational_guide/_index.md diff --git a/site/content/docs/operational_guide/availability_consistency_durability.md b/site/content/operational_guide/availability_consistency_durability.md similarity index 100% rename from site/content/docs/operational_guide/availability_consistency_durability.md rename to site/content/operational_guide/availability_consistency_durability.md diff --git a/site/content/docs/operational_guide/bootstrapping_crash_recovery.md b/site/content/operational_guide/bootstrapping_crash_recovery.md similarity index 100% rename from site/content/docs/operational_guide/bootstrapping_crash_recovery.md rename to site/content/operational_guide/bootstrapping_crash_recovery.md diff --git a/site/content/docs/operational_guide/etcd.md b/site/content/operational_guide/etcd.md similarity index 100% rename from site/content/docs/operational_guide/etcd.md rename to site/content/operational_guide/etcd.md diff --git a/site/content/docs/operational_guide/fileset_migrations.md b/site/content/operational_guide/fileset_migrations.md similarity index 100% rename from site/content/docs/operational_guide/fileset_migrations.md rename to site/content/operational_guide/fileset_migrations.md diff --git a/site/content/docs/operational_guide/kernel_configuration.md b/site/content/operational_guide/kernel_configuration.md similarity index 100% rename from site/content/docs/operational_guide/kernel_configuration.md rename to site/content/operational_guide/kernel_configuration.md diff --git a/site/content/docs/operational_guide/mapping_rollup.md b/site/content/operational_guide/mapping_rollup.md similarity index 100% rename from site/content/docs/operational_guide/mapping_rollup.md rename to site/content/operational_guide/mapping_rollup.md diff --git a/site/content/docs/operational_guide/monitoring.md b/site/content/operational_guide/monitoring.md similarity index 100% rename from site/content/docs/operational_guide/monitoring.md rename to site/content/operational_guide/monitoring.md diff --git a/site/content/docs/operational_guide/multiple_m3db_clusters.md b/site/content/operational_guide/multiple_m3db_clusters.md similarity index 100% rename from site/content/docs/operational_guide/multiple_m3db_clusters.md rename to site/content/operational_guide/multiple_m3db_clusters.md diff --git a/site/content/docs/operational_guide/namespace_configuration.md b/site/content/operational_guide/namespace_configuration.md similarity index 100% rename from site/content/docs/operational_guide/namespace_configuration.md rename to site/content/operational_guide/namespace_configuration.md diff --git a/site/content/docs/operational_guide/namespace_mgmt.md b/site/content/operational_guide/namespace_mgmt.md similarity index 100% rename from site/content/docs/operational_guide/namespace_mgmt.md rename to site/content/operational_guide/namespace_mgmt.md diff --git a/site/content/docs/operational_guide/placement.md b/site/content/operational_guide/placement.md similarity index 100% rename from site/content/docs/operational_guide/placement.md rename to site/content/operational_guide/placement.md diff --git a/site/content/docs/operational_guide/placement_configuration.md b/site/content/operational_guide/placement_configuration.md similarity index 100% rename from site/content/docs/operational_guide/placement_configuration.md rename to site/content/operational_guide/placement_configuration.md diff --git a/site/content/docs/operational_guide/repairs.md b/site/content/operational_guide/repairs.md similarity index 100% rename from site/content/docs/operational_guide/repairs.md rename to site/content/operational_guide/repairs.md diff --git a/site/content/docs/operational_guide/replication_and_deployment_in_zones.md b/site/content/operational_guide/replication_and_deployment_in_zones.md similarity index 100% rename from site/content/docs/operational_guide/replication_and_deployment_in_zones.md rename to site/content/operational_guide/replication_and_deployment_in_zones.md diff --git a/site/content/docs/operational_guide/replication_between_clusters.md b/site/content/operational_guide/replication_between_clusters.md similarity index 100% rename from site/content/docs/operational_guide/replication_between_clusters.md rename to site/content/operational_guide/replication_between_clusters.md diff --git a/site/content/docs/operational_guide/replication_global.png b/site/content/operational_guide/replication_global.png similarity index 100% rename from site/content/docs/operational_guide/replication_global.png rename to site/content/operational_guide/replication_global.png diff --git a/site/content/docs/operational_guide/replication_region.png b/site/content/operational_guide/replication_region.png similarity index 100% rename from site/content/docs/operational_guide/replication_region.png rename to site/content/operational_guide/replication_region.png diff --git a/site/content/docs/operational_guide/replication_single_zone.png b/site/content/operational_guide/replication_single_zone.png similarity index 100% rename from site/content/docs/operational_guide/replication_single_zone.png rename to site/content/operational_guide/replication_single_zone.png diff --git a/site/content/docs/operational_guide/resource_limits.md b/site/content/operational_guide/resource_limits.md similarity index 100% rename from site/content/docs/operational_guide/resource_limits.md rename to site/content/operational_guide/resource_limits.md diff --git a/site/content/docs/operational_guide/upgrading_m3.md b/site/content/operational_guide/upgrading_m3.md similarity index 100% rename from site/content/docs/operational_guide/upgrading_m3.md rename to site/content/operational_guide/upgrading_m3.md diff --git a/site/content/operator/_index.md b/site/content/operator/_index.md new file mode 100644 index 0000000000..e6adbca423 --- /dev/null +++ b/site/content/operator/_index.md @@ -0,0 +1,6 @@ +--- +title: "Kubernetes Operator" +menuTitle: "Kubernetes Operator" +weight: 3 +chapter: true +--- diff --git a/site/content/operator/api.md b/site/content/operator/api.md new file mode 100644 index 0000000000..6c699f08df --- /dev/null +++ b/site/content/operator/api.md @@ -0,0 +1,277 @@ +--- +title: "API" +menuTitle: "API" +weight: 4 +chapter: true +--- + +This document enumerates the Custom Resource Definitions used by the M3DB Operator. It is auto-generated from code comments. + +## Table of Contents +* [ClusterCondition](#clustercondition) +* [ClusterSpec](#clusterspec) +* [ExternalCoordinatorConfig](#externalcoordinatorconfig) +* [IsolationGroup](#isolationgroup) +* [M3DBCluster](#m3dbcluster) +* [M3DBClusterList](#m3dbclusterlist) +* [M3DBStatus](#m3dbstatus) +* [NodeAffinityTerm](#nodeaffinityterm) +* [AggregatedAttributes](#aggregatedattributes) +* [Aggregation](#aggregation) +* [AggregationOptions](#aggregationoptions) +* [DownsampleOptions](#downsampleoptions) +* [IndexOptions](#indexoptions) +* [Namespace](#namespace) +* [NamespaceOptions](#namespaceoptions) +* [RetentionOptions](#retentionoptions) +* [PodIdentity](#podidentity) +* [PodIdentityConfig](#podidentityconfig) + +## ClusterCondition + +ClusterCondition represents various conditions the cluster can be in. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| type | Type of cluster condition. | ClusterConditionType | false | +| status | Status of the condition (True, False, Unknown). | corev1.ConditionStatus | false | +| lastUpdateTime | Last time this condition was updated. | string | false | +| lastTransitionTime | Last time this condition transitioned from one status to another. | string | false | +| reason | Reason this condition last changed. | string | false | +| message | Human-friendly message about this condition. | string | false | + +[Back to TOC](#table-of-contents) + +## ClusterSpec + +ClusterSpec defines the desired state for a M3 cluster to be converge to. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| image | Image specifies which docker image to use with the cluster | string | false | +| replicationFactor | ReplicationFactor defines how many replicas | int32 | false | +| numberOfShards | NumberOfShards defines how many shards in total | int32 | false | +| isolationGroups | IsolationGroups specifies a map of key-value pairs. Defines which isolation groups to deploy persistent volumes for data nodes | [][IsolationGroup](#isolationgroup) | false | +| namespaces | Namespaces specifies the namespaces this cluster will hold. | [][Namespace](#namespace) | false | +| etcdEndpoints | EtcdEndpoints defines the etcd endpoints to use for service discovery. Must be set if no custom configmap is defined. If set, etcd endpoints will be templated in to the default configmap template. | []string | false | +| keepEtcdDataOnDelete | KeepEtcdDataOnDelete determines whether the operator will remove cluster metadata (placement + namespaces) in etcd when the cluster is deleted. Unless true, etcd data will be cleared when the cluster is deleted. | bool | false | +| enableCarbonIngester | EnableCarbonIngester enables the listener port for the carbon ingester | bool | false | +| configMapName | ConfigMapName specifies the ConfigMap to use for this cluster. If unset a default configmap with template variables for etcd endpoints will be used. See \"Configuring M3DB\" in the docs for more. | *string | false | +| podIdentityConfig | PodIdentityConfig sets the configuration for pod identity. If unset only pod name and UID will be used. | *PodIdentityConfig | false | +| containerResources | Resources defines memory / cpu constraints for each container in the cluster. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#resourcerequirements-v1-core) | false | +| dataDirVolumeClaimTemplate | DataDirVolumeClaimTemplate is the volume claim template for an M3DB instance's data. It claims PersistentVolumes for cluster storage, volumes are dynamically provisioned by when the StorageClass is defined. | *[corev1.PersistentVolumeClaim](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#persistentvolumeclaim-v1-core) | false | +| podSecurityContext | PodSecurityContext allows the user to specify an optional security context for pods. | *corev1.PodSecurityContext | false | +| securityContext | SecurityContext allows the user to specify a container-level security context. | *corev1.SecurityContext | false | +| imagePullSecrets | ImagePullSecrets will be added to every pod. | [][corev1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#localobjectreference-v1-core) | false | +| envVars | EnvVars defines custom environment variables to be passed to M3DB containers. | []corev1.EnvVar | false | +| labels | Labels sets the base labels that will be applied to resources created by the cluster. // TODO(schallert): design doc on labeling scheme. | map[string]string | false | +| annotations | Annotations sets the base annotations that will be applied to resources created by the cluster. | map[string]string | false | +| tolerations | Tolerations sets the tolerations that will be applied to all M3DB pods. | []corev1.Toleration | false | +| priorityClassName | PriorityClassName sets the priority class for all M3DB pods. | string | false | +| nodeEndpointFormat | NodeEndpointFormat allows overriding of the endpoint used for a node in the M3DB placement. Defaults to \"{{ .PodName }}.{{ .M3DBService }}:{{ .Port }}\". Useful if access to the cluster from other namespaces is desired. See \"Node Endpoint\" docs for full variables available. | string | false | +| hostNetwork | HostNetwork indicates whether M3DB pods should run in the same network namespace as the node its on. This option should be used sparingly due to security concerns outlined in the linked documentation. https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces | bool | false | +| dnsPolicy | DNSPolicy allows the user to set the pod's DNSPolicy. This is often used in conjunction with HostNetwork.+optional | *corev1.DNSPolicy | false | +| externalCoordinator | Specify a \"controlling\" coordinator for the cluster. | *[ExternalCoordinatorConfig](#externalcoordinatorconfig) | false | +| initContainers | Custom setup for db nodes can be done via initContainers Provide the complete spec for the initContainer here If any storage volumes are needed in the initContainer see InitVolumes below | []corev1.Container | false | +| initVolumes | If the InitContainers require any storage volumes Provide the complete specification for the required Volumes here | []corev1.Volume | false | +| podMetadata | PodMetadata is for any Metadata that is unique to the pods, and does not belong on any other objects, such as Prometheus scrape tags | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#objectmeta-v1-meta) | false | +| parallelPodManagement | ParallelPodManagement sets StatefulSets created by the operator to have Parallel pod management instead of OrderedReady. If nil, this will default to true. | *bool | true | +| serviceAccountName | To use a non-default service account, specify the name here otherwise the service account \"default\" will be used. This is useful for advanced use-cases such as pod security policies. The service account must exist. This operator will not create it. | string | false | +| frozen | Frozen is used to stop the operator from taking any further actions on a cluster. This is useful when troubleshooting as it guarantees the operator won't make any changes to the cluster. | bool | false | + +[Back to TOC](#table-of-contents) + +## ExternalCoordinatorConfig + +ExternalCoordinatorConfig defines parameters for using an external coordinator to control the cluster.\n\n- It is expected that there is a separate standalone coordinator cluster. - It is externally managed - not managed by this operator. - It is expected to have a service endpoint.\n\nSetup this db cluster, but do not assume a co-located coordinator. Instead provide a selector here so we can point to a separate coordinator service. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| selector | | map[string]string | true | +| serviceEndpoint | | string | false | + +[Back to TOC](#table-of-contents) + +## IsolationGroup + +IsolationGroup defines the name of zone as well attributes for the zone configuration + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| name | Name is the value that will be used in StatefulSet labels, pod labels, and M3DB placement \"isolationGroup\" fields. | string | true | +| nodeAffinityTerms | NodeAffinityTerms is an array of NodeAffinityTerm requirements, which are ANDed together to indicate what nodes an isolation group can be assigned to. | [][NodeAffinityTerm](#nodeaffinityterm) | false | +| numInstances | NumInstances defines the number of instances. | int32 | true | +| storageClassName | StorageClassName is the name of the StorageClass to use for this isolation group. This allows ensuring that PVs will be created in the same zone as the pinned statefulset on Kubernetes < 1.12 (when topology aware volume scheduling was introduced). Only has effect if the clusters `dataDirVolumeClaimTemplate` is non-nil. If set, the volume claim template will have its storageClassName field overridden per-isolationgroup. If unset the storageClassName of the volumeClaimTemplate will be used. | string | false | + +[Back to TOC](#table-of-contents) + +## M3DBCluster + +M3DBCluster defines the cluster + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| metadata | | [metav1.ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#objectmeta-v1-meta) | false | +| type | | string | true | +| spec | | [ClusterSpec](#clusterspec) | true | +| status | | [M3DBStatus](#m3dbstatus) | false | + +[Back to TOC](#table-of-contents) + +## M3DBClusterList + +M3DBClusterList represents a list of M3DB Clusters + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| metadata | | [metav1.ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#listmeta-v1-meta) | false | +| items | | [][M3DBCluster](#m3dbcluster) | true | + +[Back to TOC](#table-of-contents) + +## M3DBStatus + +M3DBStatus contains the current state the M3DB cluster along with a human readable message + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| state | State is a enum of green, yellow, and red denoting the health of the cluster | M3DBState | false | +| conditions | Various conditions about the cluster. | [][ClusterCondition](#clustercondition) | false | +| message | Message is a human readable message indicating why the cluster is in it's current state | string | false | +| observedGeneration | ObservedGeneration is the last generation of the cluster the controller observed. Kubernetes will automatically increment metadata.Generation every time the cluster spec is changed. | int64 | false | + +[Back to TOC](#table-of-contents) + +## NodeAffinityTerm + +NodeAffinityTerm represents a node label and a set of label values, any of which can be matched to assign a pod to a node. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| key | Key is the label of the node. | string | true | +| values | Values is an array of values, any of which a node can have for a pod to be assigned to it. | []string | true | + +[Back to TOC](#table-of-contents) + +## AggregatedAttributes + +AggregatedAttributes are attributes specifying how data points are aggregated. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| resolution | Resolution is the time range to aggregate data across. | string | false | +| downsampleOptions | DownsampleOptions stores options for downsampling data points. | *[DownsampleOptions](#downsampleoptions) | false | + +[Back to TOC](#table-of-contents) + +## Aggregation + +Aggregation describes data points within a namespace. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| aggregated | Aggregated indicates whether data points are aggregated or not. | bool | false | +| attributes | Attributes defines how data is aggregated when Aggregated is set to true. This field is ignored when aggregated is false. | [AggregatedAttributes](#aggregatedattributes) | false | + +[Back to TOC](#table-of-contents) + +## AggregationOptions + +AggregationOptions is a set of options for aggregating data within the namespace. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| aggregations | Aggregations are the aggregations for a namespace. | [][Aggregation](#aggregation) | false | + +[Back to TOC](#table-of-contents) + +## DownsampleOptions + +DownsampleOptions is a set of options related to downsampling data. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| all | All indicates whether to send data points to this namespace. If set to false, this namespace will not receive data points. In this case, data will need to be sent to the namespace via another mechanism (e.g. rollup/recording rules). | bool | false | + +[Back to TOC](#table-of-contents) + +## IndexOptions + +IndexOptions defines parameters for indexing. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| enabled | Enabled controls whether metric indexing is enabled. | bool | false | +| blockSize | BlockSize controls the index block size. | string | false | + +[Back to TOC](#table-of-contents) + +## Namespace + +Namespace defines an M3DB namespace or points to a preset M3DB namespace. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| name | Name is the namespace name. | string | false | +| preset | Preset indicates preset namespace options. | string | false | +| options | Options points to optional custom namespace configuration. | *[NamespaceOptions](#namespaceoptions) | false | + +[Back to TOC](#table-of-contents) + +## NamespaceOptions + +NamespaceOptions defines parameters for an M3DB namespace. See https://m3db.github.io/m3/operational_guide/namespace_configuration/ for more details. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| bootstrapEnabled | BootstrapEnabled control if bootstrapping is enabled. | bool | false | +| flushEnabled | FlushEnabled controls whether flushing is enabled. | bool | false | +| writesToCommitLog | WritesToCommitLog controls whether commit log writes are enabled. | bool | false | +| cleanupEnabled | CleanupEnabled controls whether cleanups are enabled. | bool | false | +| repairEnabled | RepairEnabled controls whether repairs are enabled. | bool | false | +| snapshotEnabled | SnapshotEnabled controls whether snapshotting is enabled. | bool | false | +| retentionOptions | RetentionOptions sets the retention parameters. | [RetentionOptions](#retentionoptions) | false | +| indexOptions | IndexOptions sets the indexing parameters. | [IndexOptions](#indexoptions) | false | +| coldWritesEnabled | ColdWritesEnabled controls whether cold writes are enabled. | bool | false | +| aggregationOptions | AggregationOptions sets the aggregation parameters. | [AggregationOptions](#aggregationoptions) | false | + +[Back to TOC](#table-of-contents) + +## RetentionOptions + +RetentionOptions defines parameters for data retention. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| retentionPeriod | RetentionPeriod controls how long data for the namespace is retained. | string | false | +| blockSize | BlockSize controls the block size for the namespace. | string | false | +| bufferFuture | BufferFuture controls how far in the future metrics can be written. | string | false | +| bufferPast | BufferPast controls how far in the past metrics can be written. | string | false | +| blockDataExpiry | BlockDataExpiry controls the block expiry. | bool | false | +| blockDataExpiryAfterNotAccessPeriod | BlockDataExpiry controls the not after access period for expiration. | string | false | + +[Back to TOC](#table-of-contents) + +## PodIdentity + +PodIdentity contains all the fields that may be used to identify a pod's identity in the M3DB placement. Any non-empty fields will be used to identity uniqueness of a pod for the purpose of M3DB replace operations. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| name | | string | false | +| uid | | string | false | +| nodeName | | string | false | +| nodeExternalID | | string | false | +| nodeProviderID | | string | false | + +[Back to TOC](#table-of-contents) + +## PodIdentityConfig + +PodIdentityConfig contains cluster-level configuration for deriving pod identity. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| sources | Sources enumerates the sources from which to derive pod identity. Note that a pod's name will always be used. If empty, defaults to pod name and UID. | []PodIdentitySource | true | + +[Back to TOC](#table-of-contents) diff --git a/site/content/operator/configuration/_index.md b/site/content/operator/configuration/_index.md new file mode 100644 index 0000000000..253cacbb27 --- /dev/null +++ b/site/content/operator/configuration/_index.md @@ -0,0 +1,5 @@ +--- +title: "Configuration" +weight: 3 +chapter: true +--- \ No newline at end of file diff --git a/site/content/operator/configuration/configuring_m3db.md b/site/content/operator/configuration/configuring_m3db.md new file mode 100644 index 0000000000..3392eadcee --- /dev/null +++ b/site/content/operator/configuration/configuring_m3db.md @@ -0,0 +1,34 @@ +--- +title: "Configuring M3DB" +menuTitle: "Configuring M3DB" +weight: 10 +chapter: true +--- + +By default the operator will apply a configmap with basic M3DB options and settings for the coordinator to direct +Prometheus reads/writes to the cluster. This template can be found +[here](https://github.com/m3db/m3db-operator/blob/master/assets/default-config.tmpl). + +To apply custom a configuration for the M3DB cluster, one can set the `configMapName` parameter of the cluster [spec] to +an existing configmap. + +## Environment Warning + +If providing a custom config map, the `env` you specify in your [config][config] **must** be `$NAMESPACE/$NAME`, where +`$NAMESPACE` is the Kubernetes namespace your cluster is in and `$NAME` is the name of the cluster. For example, with +the following cluster: + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +metadata: + name: cluster-a + namespace: production +... +``` + +The value of `env` in your config **MUST** be `production/cluster-a`. This restriction allows multiple M3DB clusters to +safely share the same etcd cluster. + +[spec]: /docs/operator/api +[config]: https://github.com/m3db/m3db-operator/blob/795973f3329437ced3ac942da440810cd0865235/assets/default-config.yaml#L77 diff --git a/site/content/operator/configuration/namespaces.md b/site/content/operator/configuration/namespaces.md new file mode 100644 index 0000000000..feca9d58f2 --- /dev/null +++ b/site/content/operator/configuration/namespaces.md @@ -0,0 +1,72 @@ +--- +title: "Namespaces" +menuTitle: "Namespaces" +weight: 12 +chapter: true +--- + +M3DB uses the concept of [namespaces][m3db-namespaces] to determine how metrics are stored and retained. The M3DB +operator allows a user to define their own namespaces, or to use a set of presets we consider to be suitable for +production use cases. + +Namespaces are configured as part of an `m3dbcluster` [spec][api-namespaces]. + +## Presets + +### `10s:2d` + +This preset will store metrics at 10 second resolution for 2 days. For example, in your cluster spec: + +```yaml +spec: +... + namespaces: + - name: metrics-short-term + preset: 10s:2d +``` + +### `1m:40d` + +This preset will store metrics at 1 minute resolution for 40 days. + +```yaml +spec: +... + namespaces: + - name: metrics-long-term + preset: 1m:40d +``` + +## Custom Namespaces + +You can also define your own custom namespaces by setting the `NamespaceOptions` within a cluster spec. The +[API][api-ns-options] lists all available fields. As an example, a namespace to store 7 days of data may look like: +```yaml +... +spec: +... + namespaces: + - name: custom-7d + options: + bootstrapEnabled: true + flushEnabled: true + writesToCommitLog: true + cleanupEnabled: true + snapshotEnabled: true + repairEnabled: false + retentionOptions: + retentionPeriod: 168h + blockSize: 12h + bufferFuture: 20m + bufferPast: 20m + blockDataExpiry: true + blockDataExpiryAfterNotAccessPeriod: 5m + indexOptions: + enabled: true + blockSize: 12h +``` + + +[api-namespaces]: /docs/operator/api#namespace +[api-ns-options]: /docs/operator/api#namespaceoptions +[m3db-namespaces]: https://docs.m3db.io/operational_guide/namespace_configuration/ diff --git a/site/content/operator/configuration/node_affinity.md b/site/content/operator/configuration/node_affinity.md new file mode 100644 index 0000000000..460fb5dc4d --- /dev/null +++ b/site/content/operator/configuration/node_affinity.md @@ -0,0 +1,197 @@ +--- +title: "Node Affinity & Cluster Topology" +menuTitle: "Node Affinity" +weight: 13 +chapter: true +--- + +## Node Affinity + +Kubernetes allows pods to be assigned to nodes based on various critera through [node affinity][k8s-node-affinity]. + +M3DB was built with failure tolerance as a core feature. M3DB's [isolation groups][m3db-isogroups] allow shards to be +placed across failure domains such that the loss of no single domain can cause the cluster to lose quorum. More details +on M3DB's resiliency can be found in the [deployment docs][m3db-deployment]. + +By leveraging Kubernetes' node affinity and M3DB's isolation groups, the operator can guarantee that M3DB pods are +distributed across failure domains. For example, in a Kubernetes cluster spread across 3 zones in a cloud region, the +`isolationGroups` configuration below would guarantee that no single zone failure could degrade the M3DB cluster. + +M3DB is unaware of the underlying zone topology: it just views the isolation groups as `group1`, `group2`, `group3` in +its [placement][m3db-placement]. Thanks to the Kubernetes scheduler, however, these groups are actually scheduled across +separate failure domains. + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +... +spec: + replicationFactor: 3 + isolationGroups: + - name: group1 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-b + - name: group2 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-c + - name: group3 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-d +``` + +## Tolerations + +In addition to allowing pods to be assigned to certain nodes via node affinity, Kubernetes allows pods to be _repelled_ +from nodes through [taints][k8s-taints] if they don't tolerate the taint. For example, the following config would ensure: + +1. Pods are spread across zones. + +2. Pods are only assigned to nodes in the `m3db-dedicated-pool` pool. + +3. No other pods could be assigned to those nodes (assuming they were tainted with the taint `m3db-dedicated-taint`). + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +... +spec: + replicationFactor: 3 + isolationGroups: + - name: group1 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-b + - key: nodepool + values: + - m3db-dedicated-pool + - name: group2 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-c + - key: nodepool + values: + - m3db-dedicated-pool + - name: group3 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-d + - key: nodepool + values: + - m3db-dedicated-pool + tolerations: + - key: m3db-dedicated + effect: NoSchedule + operator: Exists +``` + +## Example Affinity Configurations + +### Zonal Cluster + +The examples so far have focused on multi-zone Kubernetes clusters. Some users may only have a cluster in a single zone +and accept the reduced fault tolerance. The following configuration shows how to configure the operator in a zonal +cluster. + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +... +spec: + replicationFactor: 3 + isolationGroups: + - name: group1 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-b + - name: group2 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-b + - name: group3 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-b +``` + +### 6 Zone Cluster + +In the above examples we created clusters with 1 isolation group in each of 3 zones. Because `values` within a single +[NodeAffinityTerm][node-affinity-term] are OR'd, we can also spread an isolationgroup across multiple zones. For +example, if we had 6 zones available to us: + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +... +spec: + replicationFactor: 3 + isolationGroups: + - name: group1 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-a + - us-east1-b + - name: group2 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-c + - us-east1-d + - name: group3 + numInstances: 3 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-east1-e + - us-east1-f +``` + +### No Affinity + +If there are no failure domains available, one can have a cluster with no affinity where the pods will be scheduled however Kubernetes would place them by default: + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +... +spec: + replicationFactor: 3 + isolationGroups: + - name: group1 + numInstances: 3 + - name: group2 + numInstances: 3 + - name: group3 + numInstances: 3 +``` + +[k8s-node-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +[k8s-taints]: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +[m3db-deployment]: https://docs.m3db.io/operational_guide/replication_and_deployment_in_zones/ +[m3db-isogroups]: https://docs.m3db.io/operational_guide/placement_configuration/#isolation-group +[m3db-placement]: https://docs.m3db.io/operational_guide/placement/ +[node-affinity-term]: /docs/operator/api/#nodeaffinityterm diff --git a/site/content/operator/configuration/node_endpoint.md b/site/content/operator/configuration/node_endpoint.md new file mode 100644 index 0000000000..0afb147b2d --- /dev/null +++ b/site/content/operator/configuration/node_endpoint.md @@ -0,0 +1,32 @@ +--- +title: "Node Endpoint" +menuTitle: "Node Endpoint" +weight: 14 +chapter: true +--- + +M3DB stores an [`endpoint`][proto] field on placement instances that is used for communication between DB nodes and from +other components such as the coordinator. + +The operator allows customizing the format of this endpoint by setting the `nodeEndpointFormat` field on a cluster spec. +The format of this field uses [Go templates], with the following template fields currently supported: + +| Field | Description | +| ----- | ----------- | +| `PodName` | Name of the pod | +| `M3DBService` | Name of the generated M3DB service | +| `PodNamespace` | Namespace the pod is in | +| `Port` | Port M3DB is serving RPCs on | + +The default format is: +``` +{{ .PodName }}.{{ .M3DBService }}:{{ .Port }} +``` + +As an example of an override, to expose an M3DB cluster to containers in other Kubernetes namespaces `nodeEndpointFormat` can be set to: +``` +{{ .PodName }}.{{ .M3DBService }}.{{ .PodNamespace }}:{{ .Port }} +``` + +[proto]: https://github.com/m3db/m3/blob/9b1dc3051a17620c0a983d60057a9a8c115af9d4/src/cluster/generated/proto/placementpb/placement.proto#L47 +[Go templates]: https://golang.org/pkg/text/template/ diff --git a/site/content/operator/configuration/pod_identity.md b/site/content/operator/configuration/pod_identity.md new file mode 100644 index 0000000000..b35a8722ff --- /dev/null +++ b/site/content/operator/configuration/pod_identity.md @@ -0,0 +1,69 @@ +--- +title: "Pod Identity" +menuTitle: "Pod Identity" +weight: 11 +chapter: true +--- + +## Motivation + +M3DB assumes that if a process is started and owns sealed shards marked as `Available` that its data for those shards is +valid and does not have to be fetched from peers. Consequentially this means it will begin serving reads for that data. +For more background on M3DB topology, see the [M3DB topology docs][topology-docs]. + +In most environments in which M3DB has been deployed in production, it has been on a set of hosts predetermined by +whomever is managing the cluster. This means that an M3DB instance is identified in a toplogy by its hostname, and that +when an M3DB process comes up and finds its hostname in the cluster with `Available` shards that it can serve reads for +those shards. + +This does not work on Kubernetes, particularly when working with StatefulSets, as a pod may be rescheduled on a new node +or with new storage attached but its name may stay the same. If we were to naively use an instance's hostname (pod +name), and it were to get rescheduled on a new node with no data, it could assume that absence of data is valid and +begin returning empty results for read requests. + +To account for this, the M3DB Operator determines an M3DB instance's identity in the topology based on a configurable +set of metadata about the pod. + +## Configuration + +The M3DB operator uses a configurable set of metadata about a pod to determine its identity in the M3DB placement. This +is encapsulated in the [PodIdentityConfig][pod-id-api] field of a cluster's spec. In addition to the configures sources, +a pod's name will always be included. + +Every pod in an M3DB cluster is annotated with its identity and is passed to the M3DB instance via a downward API +volume. + +### Sources + +This section will be filled out as a number of pending PRs land. + +## Recommendations + +### No Persistent Storage + +If not using PVs, you should set `sources` to `PodUID`: +``` +podIdentityConfig: + sources: + - PodUID +``` + +This way whenever a container is rescheduled, the operator will initiate a replace and it will stream data from its +peers before serving reads. Note that not having persistent storage is not a recommended way to run M3DB. + +### Remote Persistent Storage + +If using remote storage you do not need to set sources, as it will default to just the pods name. The data for an M3DB +instance will move around with its container. + +### Local Persistent Storage + +If using persistent local volumes, you should set sources to `NodeName`. In this configuration M3DB will consider a pod +to be the same so long as it's on the same node. Replaces will only be triggered if a pod with the same name is moved to +a new host. + +Note that if using local SSDs on GKE, node names may stay the same even though a VM has been recreated. We also support +`ProviderID`, which will use the underlying VM's unique ID number in GCE to identity host uniqueness. + +[pod-id-api]: /docs/operator/api/#podidentityconfig +[topology-docs]: https://docs.m3db.io/operational_guide/placement/ diff --git a/site/content/operator/getting_started/_index.md b/site/content/operator/getting_started/_index.md new file mode 100644 index 0000000000..f8bb8e1357 --- /dev/null +++ b/site/content/operator/getting_started/_index.md @@ -0,0 +1,5 @@ +--- +title: "Getting Started" +weight: 2 +chapter: true +--- \ No newline at end of file diff --git a/site/content/operator/getting_started/create_cluster.md b/site/content/operator/getting_started/create_cluster.md new file mode 100644 index 0000000000..7973a41d95 --- /dev/null +++ b/site/content/operator/getting_started/create_cluster.md @@ -0,0 +1,178 @@ +--- +title: "Creating a Cluster" +menuTitle: "Creating a Cluster" +weight: 12 +chapter: true +--- + +Once you've [installed](/docs/operator/getting_started/installation) the M3DB operator and read over the [requirements](/docs/operator/getting_started/requirements), you can start +creating some M3DB clusters! + +## Basic Cluster + +The following creates an M3DB cluster spread across 3 zones, with each M3DB instance being able to store up to 350gb of +data using your Kubernetes cluster's default storage class. For examples of different cluster topologies, such as zonal +clusters, see the docs on [node affinity][node-affinity]. + +### Etcd + +Create an etcd cluster with persistent volumes: +``` +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/v0.10.0/example/etcd/etcd-pd.yaml +``` + +We recommend modifying the `storageClassName` in the manifest to one that matches your cloud provider's fastest remote +storage option, such as `pd-ssd` on GCP. + +### M3DB + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +metadata: + name: persistent-cluster +spec: + image: quay.io/m3db/m3dbnode:latest + replicationFactor: 3 + numberOfShards: 256 + isolationGroups: + - name: group1 + numInstances: 1 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - + - name: group2 + numInstances: 1 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - + - name: group3 + numInstances: 1 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - + etcdEndpoints: + - http://etcd-0.etcd:2379 + - http://etcd-1.etcd:2379 + - http://etcd-2.etcd:2379 + podIdentityConfig: + sources: [] + namespaces: + - name: metrics-10s:2d + preset: 10s:2d + dataDirVolumeClaimTemplate: + metadata: + name: m3db-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 350Gi + limits: + storage: 350Gi +``` + +## Ephemeral Cluster + +**WARNING:** This setup is not intended for production-grade clusters, but rather for "kicking the tires" with the +operator and M3DB. It is intended to work across almost any Kubernetes environment, and as such has as few dependencies +as possible (namely persistent storage). See below for instructions on creating a more durable cluster. + +### Etcd + +Create an etcd cluster in the same namespace your M3DB cluster will be created in. If you don't have persistent storage +available, this will create a cluster that will not use persistent storage and will likely become unavailable if any of +the pods die: + +``` +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/v0.10.0/example/etcd/etcd-basic.yaml + +# Verify etcd health once pods available +kubectl exec etcd-0 -- env ETCDCTL_API=3 etcdctl endpoint health +# 127.0.0.1:2379 is healthy: successfully committed proposal: took = 2.94668ms +``` + +If you have remote storage available and would like to jump straight to using it, apply the following manifest for etcd +instead: +``` +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/v0.10.0/example/etcd/etcd-pd.yaml +``` + +### M3DB + +Once etcd is available, you can create an M3DB cluster. An example of a very basic M3DB cluster definition is as +follows: + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +metadata: + name: simple-cluster +spec: + image: quay.io/m3db/m3dbnode:latest + replicationFactor: 3 + numberOfShards: 256 + etcdEndpoints: + - http://etcd-0.etcd:2379 + - http://etcd-1.etcd:2379 + - http://etcd-2.etcd:2379 + isolationGroups: + - name: group1 + numInstances: 1 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - + - name: group2 + numInstances: 1 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - + - name: group3 + numInstances: 1 + nodeAffinityTerms: + - key: failure-domain.beta.kubernetes.io/zone + values: + - + podIdentityConfig: + sources: + - PodUID + namespaces: + - name: metrics-10s:2d + preset: 10s:2d +``` + +This will create a highly available cluster with RF=3 spread evenly across the three given zones within a region. A +pod's UID will be used for its [identity][pod-identity]. The cluster will have 1 [namespace](/docs/operator/configuration/namespaces) that stores +metrics for 2 days at 10s resolution. + +Next, apply your manifest: +``` +$ kubectl apply -f example/simple-cluster.yaml +m3dbcluster.operator.m3db.io/simple-cluster created +``` + +Shortly after all pods are created you should see the cluster ready! + +``` +$ kubectl get po -l operator.m3db.io/app=m3db +NAME READY STATUS RESTARTS AGE +simple-cluster-rep0-0 1/1 Running 0 1m +simple-cluster-rep1-0 1/1 Running 0 56s +simple-cluster-rep2-0 1/1 Running 0 37s +``` + +We can verify that the cluster has finished streaming data by peers by checking that an instance has bootstrapped: +``` +$ kubectl exec simple-cluster-rep2-0 -- curl -sSf localhost:9002/health +{"ok":true,"status":"up","bootstrapped":true} +``` + +[pod-identity]: /docs/operator/configuration/pod_identity +[local-volumes]: https://kubernetes.io/blog/2018/04/13/local-persistent-volumes-beta/ +[node-affinity]: /docs/operator/configuration/node_affinity diff --git a/site/content/operator/getting_started/delete_cluster.md b/site/content/operator/getting_started/delete_cluster.md new file mode 100644 index 0000000000..d3fb8d64a4 --- /dev/null +++ b/site/content/operator/getting_started/delete_cluster.md @@ -0,0 +1,46 @@ +--- +title: "Deleting a Cluster" +menuTitle: "Deleting a Cluster" +weight: 14 +chapter: true +--- + +Delete your M3DB cluster with `kubectl`: +``` +kubectl delete m3dbcluster simple-cluster +``` + +By default, the operator will delete the placement and namespaces associated with a cluster before the CRD resource +deleted. If you do **not** want this behavior, set `keepEtcdDataOnDelete` to `true` on your cluster spec. + +Under the hood, the operator uses Kubernetes [finalizers] to ensure the cluster CRD is not deleted until the operator +has had a chance to do cleanup. + +## Debugging Stuck Cluster Deletion + +If for some reason the operator is unable to delete the placement and namespace for the cluster, the cluster CRD itself +will be stuck in a state where it can not be deleted, due to the way finalizers work in Kubernetes. The operator might +be unable to clean up the data for many reasons, for example if the M3DB cluster itself is not available to serve the +APIs for cleanup or if etcd is down and cannot fulfill the deleted. + +To allow the CRD to be deleted, you can `kubectl edit m3dbcluster $CLUSTER` and remove the +`operator.m3db.io/etcd-deletion` finalizer. For example, in the following cluster you'd remove the finalizer from `metadata.finalizers`: + +```yaml +apiVersion: operator.m3db.io/v1alpha1 +kind: M3DBCluster +metadata: + ... + finalizers: + - operator.m3db.io/etcd-deletion + name: m3db-cluster +... +``` + +Note that if you do this, you'll have to manually remove the relevant data in etcd. For a cluster in namespace `$NS` +with name `$CLUSTER`, the keys are: + +- `_sd.placement/$NS/$CLUSTER/m3db` +- `_kv/$NS/$CLUSTER/m3db.node.namespaces` + +[finalizers]: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#finalizers diff --git a/site/content/operator/getting_started/installation.md b/site/content/operator/getting_started/installation.md new file mode 100644 index 0000000000..120353aa33 --- /dev/null +++ b/site/content/operator/getting_started/installation.md @@ -0,0 +1,34 @@ +--- +title: "Installation" +menuTitle: "Installation" +weight: 11 +chapter: true +--- + +Be sure to take a look at the [requirements](/docs/operator/getting_started/requirements) before installing the operator. + +## Helm + +1. Add the `m3db-operator` repo: + +``` +helm repo add m3db https://m3-helm-charts.storage.googleapis.com/stable +``` + +2. Install the `m3db-operator` chart: + +``` +helm install m3db-operator m3db/m3db-operator +``` + +**Note**: If uninstalling an instance of the operator that was installed with Helm, some resources such as the +ClusterRole, ClusterRoleBinding, and ServiceAccount may need to be deleted manually. + + +## Manually + +Install the bundled operator manifests in the current namespace: + +``` +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/master/bundle.yaml +``` diff --git a/site/content/operator/getting_started/monitoring.md b/site/content/operator/getting_started/monitoring.md new file mode 100644 index 0000000000..303a2bf0ba --- /dev/null +++ b/site/content/operator/getting_started/monitoring.md @@ -0,0 +1,19 @@ +--- +title: "Monitoring" +menuTitle: "Monitoring" +weight: 15 +chapter: true +--- + +M3DB exposes metrics via a Prometheus endpoint. If using the [Prometheus Operator][prometheus-operator], you can apply a +`ServiceMonitor` to have your M3DB pods automatically scraped by Prometheus: + +``` +kubectl apply -f https://raw.githubusercontent.com/m3db/m3db-operator/master/example/prometheus-servicemonitor.yaml +``` + +You can visit the "targets" page of the Prometheus UI to verify the pods are being scraped. To view these metrics using +Grafana, follow the [M3 docs][m3-grafana] to install the M3DB Grafana dashboard. + +[prometheus-operator]: https://github.com/coreos/prometheus-operator +[m3-grafana]: https://docs.m3db.io/integrations/grafana/ diff --git a/site/content/operator/getting_started/requirements.md b/site/content/operator/getting_started/requirements.md new file mode 100644 index 0000000000..ee6a51e6d7 --- /dev/null +++ b/site/content/operator/getting_started/requirements.md @@ -0,0 +1,37 @@ +--- +title: "Requirements" +menuTitle: "Requirements" +weight: 10 +chapter: true +--- + +## Kubernetes Versions + +The M3DB operator current targets Kubernetes 1.11 and 1.12. Given the operator's current production use cases at Uber, +we typically target the two most recent minor Kubernetes versions supported by GKE. We welcome community contributions +to support more recent versions while meeting the aforementioned GKE targets! + +## Multi-Zone Kubernetes Cluster + +The M3DB operator is intended to be used with Kubernetes clusters that span at least 3 zones within a region to create +highly available clusters and maintain quorum in the event of region failures. Instructions for creating regional +clusters on GKE can be found [here][gke-regional]. + +## Etcd + +M3DB stores its cluster topology and all other runtime metadata in [etcd][etcd]. + +For *testing / non-production use cases*, we provide simple manifests for running etcd on Kubernetes in our [example +manifests][etcd-example]: one for running ephemeral etcd containers and one for running etcd using basic persistent +volumes. If using the `etcd-pd` yaml manifest, we recommend a modification to use a `StorageClass` equivalent to your +cloud provider's fastest remote disk (such as `pd-ssd` on GCP). + +For production use cases, we recommend running etcd (in order of preference): + +1. External to your Kubernetes cluster to avoid circular dependencies. +2. Using the [etcd operator][etcd-operator]. + +[etcd]: https://etcd.io +[etcd-example]: https://github.com/m3db/m3db-operator/tree/master/example/etcd +[etcd-operator]: https://github.com/coreos/etcd-operator +[gke-regional]: https://cloud.google.com/kubernetes-engine/docs/concepts/regional-clusters diff --git a/site/content/operator/getting_started/update_cluster.md b/site/content/operator/getting_started/update_cluster.md new file mode 100644 index 0000000000..9a943bfc05 --- /dev/null +++ b/site/content/operator/getting_started/update_cluster.md @@ -0,0 +1,48 @@ +--- +title: "Updating a Cluster" +menuTitle: "Updating a Cluster" +weight: 13 +chapter: true +--- + +After your cluster has been running for some time you may decide you want to change the cluster's +spec. For instance, you may want to upgrade to a newer release of M3DB or modify the cluster's +config file. The operator can be used to safely rollout such changes so you don't need to do +anything other than add an annotation to enable updates. + +The first step in updating a cluster is to update the cluster's `M3DBCluster` CRD with the changes +you want to make. If you manage your cluster via manifests stored in YAML files then this is as +simple as updating the manifest and applying your changes: + +```bash +kubectl apply -f example/my-cluster.yaml +``` + +As a precaution, the operator won't immediately begin updating a cluster after your changes have +been applied. Instead, you'll need to add the following annotation on each `StatefulSet` in the +cluster to indicate to the operator that it is safe to update that `StatefulSet`: + +```bash +kubectl annotate statefulset my-cluster-rep0 operator.m3db.io/update=enabled +``` + +When the operator sees this annotation, it will check if the current state of the `StatefulSet` +differs from its desired state as defined by the `M3DBCluster` CRD. If so, the operator will +update the `StatefulSet` to match its desired state, thereby triggering a rollout of the pods in +the `StatefulSet`. The operator will also remove the `operator.m3db.io/update=enabled` annotation +from the updated `StatefulSet`. + +If, on the other hand, the operator finds the update annotation on a `StatefulSet` but it doesn't +need to be updated then the operator will remove the annotation but perform no other actions. +Consequently, once you set the update annotation on a `StatefulSet`, you can watch for the +annotation to be removed from it to know if the operator has seen and checked for an update. + +Since M3DB rollouts can take longer periods of time, it's often more convenient to set the +annotation to enable updates on each `StatefulSet` in the cluster at once, and allow the operator +to perform the rollout safely. The operator will update only one `StatefulSet` at a time and then +wait for it to bootstrap and become healthy again before moving onto the next `StatefulSet` in the +cluster so that no two replicas are ever down at the same time. + +```bash +kubectl annotate statefulset -l operator.m3db.io/cluster=my-cluster operator.m3db.io/update=enabled +``` diff --git a/site/content/operator/operator.md b/site/content/operator/operator.md new file mode 100644 index 0000000000..36f15d78a4 --- /dev/null +++ b/site/content/operator/operator.md @@ -0,0 +1,31 @@ +--- +title: "M3DB Operator" +menuTitle: "M3DB Operator" +weight: 1 +chapter: true +--- + +This documentation is for the M3DB [Kubernetes operator][operators] which can run and operate [M3DB][m3db] on Kubernetes. + +For more background on the M3DB operator, see our [KubeCon keynote][keynote] on its origins and usage at Uber. + +## Philosophy + +The M3DB operator aims to automate everyday tasks around managing M3DB. Specifically, it aims to automate: + +- Creating M3DB clusters +- Destroying M3DB clusters +- Expanding clusters (adding instances) +- Shrinking clusters (removing instances) +- Replacing failed instances + +It explicitly does not try to automate every single edge case a user may ever run into. For example, it does not aim to +automate disaster recovery if an entire cluster is taken down. Such use cases may still require human intervention, but +the operator will aim to not conflict with such operations a human may have to take on a cluster. + +Generally speaking, the operator's philosophy is if **it would be unclear to a human what action to take, we will not +try to guess.** + +[operators]: https://coreos.com/operators/ +[m3db]: https://m3db.io/docs/ +[keynote]: https://kccna18.sched.com/event/Gsxn/keynote-smooth-operator-large-scale-automated-storage-with-kubernetes-celina-ward-software-engineer-matt-schallert-site-reliability-engineer-uber diff --git a/site/content/docs/overview/_index.md b/site/content/overview/_index.md similarity index 100% rename from site/content/docs/overview/_index.md rename to site/content/overview/_index.md diff --git a/site/content/docs/overview/components.md b/site/content/overview/components.md similarity index 100% rename from site/content/docs/overview/components.md rename to site/content/overview/components.md diff --git a/site/content/docs/overview/media.md b/site/content/overview/media.md similarity index 100% rename from site/content/docs/overview/media.md rename to site/content/overview/media.md diff --git a/site/content/docs/overview/motivation.md b/site/content/overview/motivation.md similarity index 100% rename from site/content/docs/overview/motivation.md rename to site/content/overview/motivation.md diff --git a/site/content/overview/roadmap.md b/site/content/overview/roadmap.md new file mode 100644 index 0000000000..6941e619d5 --- /dev/null +++ b/site/content/overview/roadmap.md @@ -0,0 +1,13 @@ +--- +title: "Roadmap" +weight: 4 +--- + +This roadmap is open for contributions and suggestions, it currently only defines near term goals. + +Near term: +- Add more diagrams of what M3 deployment architecture by use case +- Add tutorials for a variety of use cases +- Improve operational guides for the aggregator +- Overview of design for M3DB reverse index +- Overview of design for M3 aggregator diff --git a/site/content/quickstart/_index.md b/site/content/quickstart/_index.md new file mode 100644 index 0000000000..1bf4f0ac01 --- /dev/null +++ b/site/content/quickstart/_index.md @@ -0,0 +1,4 @@ +--- +title: "Quickstart" +weight: 1 +--- diff --git a/site/content/docs/quickstart/_index.md b/site/content/quickstart/docker.md similarity index 96% rename from site/content/docs/quickstart/_index.md rename to site/content/quickstart/docker.md index c65b29682e..104f57995b 100644 --- a/site/content/docs/quickstart/_index.md +++ b/site/content/quickstart/docker.md @@ -1,11 +1,11 @@ --- -linktitle: "Quickstart using Docker" +title: "Docker" weight: 1 --- -# Creating a Single Node M3DB Cluster with Docker +## Creating a Single Node M3DB Cluster with Docker This guide shows how to install and configure M3DB, create a single-node cluster, and read and write metrics to it. @@ -75,14 +75,14 @@ You can find more information on configuring M3DB in the [operational guides sec A time series database (TSDBs) typically consist of one node (or instance) to store metrics data. This setup is simple to use but has issues with scalability over time as the quantity of metrics data written and read increases. -As a distributed TSDB, M3DB helps solve this problem by spreading metrics data, and demand for that data, across multiple nodes in a cluster. M3DB does this by splitting data into segments that match certain criteria (such as above a certain value) across nodes into {{< glossary_tooltip text="shards" term_id="shard" >}}. +As a distributed TSDB, M3DB helps solve this problem by spreading metrics data, and demand for that data, across multiple nodes in a cluster. M3DB does this by splitting data into segments that match certain criteria (such as above a certain value) across nodes into shards. If you've worked with a distributed database before, then these concepts are probably familiar to you, but M3DB uses different terminology to represent some concepts. -- Every cluster has **one** {{< glossary_tooltip text="placement" term_id="placement" >}} that maps shards to nodes in the cluster. -- A cluster can have **0 or more** {{< glossary_tooltip text="namespaces" term_id="namespace" >}} that are similar conceptually to tables in other databases, and each node serves every namespace for the shards it owns. +- Every cluster has **one** placement that maps shards to nodes in the cluster. +- A cluster can have **0 or more** namespaces that are similar conceptually to tables in other databases, and each node serves every namespace for the shards it owns. @@ -94,6 +94,7 @@ This quickstart uses the _{{% apiendpoint %}}database/create_ endpoint that crea You can create [placements](/docs/operational_guide/placement_configuration/) and [namespaces](/docs/operational_guide/namespace_configuration/#advanced-hard-way) separately if you need more control over their settings. +The `namespaceName` argument must match the namespace in the `local` section of the `M3Coordinator` YAML configuration. If you [add any namespaces](/docs/operational_guide/namespace_configuration) you also need to add them to the `local` section of `M3Coordinator`'s YAML config. In another terminal, use the following command. diff --git a/site/content/quickstart/kubernetes.md b/site/content/quickstart/kubernetes.md new file mode 100644 index 0000000000..812995fe56 --- /dev/null +++ b/site/content/quickstart/kubernetes.md @@ -0,0 +1,10 @@ +--- +title: "Kubernetes" +weight: 2 +--- + +## Create a M3DB Cluster on Kubernetes + +1. Meet the M3DB Kubernetes operator [requirements guide](/docs/operator/getting_started/requirements). +2. Follow the M3DB Kubernetes operator [installation guide](/docs/operator/getting_started/installation). +3. Read the M3DB Kubernetes operator [configuration guide](/docs/operator/configuration/configuring_m3db) and configure [namespaces](/docs/operator/configuration/namespaces). diff --git a/site/content/docs/troubleshooting/_index.md b/site/content/troubleshooting/_index.md similarity index 100% rename from site/content/docs/troubleshooting/_index.md rename to site/content/troubleshooting/_index.md diff --git a/site/go.mod b/site/go.mod index 92399634da..9158b62a16 100644 --- a/site/go.mod +++ b/site/go.mod @@ -2,4 +2,7 @@ module m3-site go 1.15 -require github.com/chronosphereio/docs-theme v0.0.0-20201022162748-0ed11ce73f36 // indirect +require ( + github.com/chronosphereio/docs-theme v0.0.0-20201022162748-0ed11ce73f36 // indirect + github.com/chronosphereio/victor v0.0.0-20201116125303-247fa0ea9ed5 // indirect +) diff --git a/site/go.sum b/site/go.sum index 5ccc24cca7..b595e36b4c 100644 --- a/site/go.sum +++ b/site/go.sum @@ -3,3 +3,9 @@ github.com/chronosphereio/docs-theme v0.0.0-20201009164131-d9219ac30467 h1:YtTpg github.com/chronosphereio/docs-theme v0.0.0-20201009164131-d9219ac30467/go.mod h1:vmH57xlaJmtH7jjovyuwXKe+2316CnpaFShoEAG72gQ= github.com/chronosphereio/docs-theme v0.0.0-20201022162748-0ed11ce73f36 h1:Wz/dFFd3bVR+XZ7shqLyuZwyIh5yDbhIhdbdkFEFnH4= github.com/chronosphereio/docs-theme v0.0.0-20201022162748-0ed11ce73f36/go.mod h1:vmH57xlaJmtH7jjovyuwXKe+2316CnpaFShoEAG72gQ= +github.com/chronosphereio/victor v0.0.0-20201116094105-f1b13fb86890 h1:yO288wpyv4dr3nXdXjIsEM60DmeLzC4XquvnKCvoLR0= +github.com/chronosphereio/victor v0.0.0-20201116094105-f1b13fb86890/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= +github.com/chronosphereio/victor v0.0.0-20201116123616-0454e7256e57 h1:EXZaeDfAkZsOYoP3zCyZlhb+PXZO/PQSmilpTX8bX+0= +github.com/chronosphereio/victor v0.0.0-20201116123616-0454e7256e57/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= +github.com/chronosphereio/victor v0.0.0-20201116125303-247fa0ea9ed5 h1:/eksfMA9uddkIKZ5A6zcpVHjASfV6sVuNDXHSAgMtx0= +github.com/chronosphereio/victor v0.0.0-20201116125303-247fa0ea9ed5/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM= diff --git a/site/layouts/partials/logo.html b/site/layouts/partials/logo.html index ff367ee734..09dc437131 100644 --- a/site/layouts/partials/logo.html +++ b/site/layouts/partials/logo.html @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/site/layouts/partials/menu.html b/site/layouts/partials/menu.html new file mode 100644 index 0000000000..0d445aca76 --- /dev/null +++ b/site/layouts/partials/menu.html @@ -0,0 +1,76 @@ +{{- $currentNode := . }} +{{- $showvisitedlinks := .Site.Params.showVisitedLinks -}} +{{- $topSection := index .Site.Sections 0 }} +{{- range $topSection.Sections }} +{{- template "section-tree-nav" dict "sect" . "currentnode" $currentNode "showvisitedlinks" $showvisitedlinks}} +{{- end}} + + +{{- define "section-tree-nav" }} +{{- $showvisitedlinks := .showvisitedlinks }} +{{- $currentNode := .currentnode }} + {{- with .sect}} + {{- if and .IsSection (or (not .Params.hidden) $.showhidden)}} + {{- $numberOfPages := (add (len .Pages) (len .Sections)) }} + {{- safeHTML .Params.head}} + + {{- else}} + {{- if not .Params.Hidden }} + + {{- end}} + {{- end}} + {{- end}} +{{- end}} diff --git a/site/static/about/index.html b/site/static/about/index.html index 30417da50f..ce6d8255f8 100644 --- a/site/static/about/index.html +++ b/site/static/about/index.html @@ -201,7 +201,7 @@

Getting Started @@ -472,7 +472,7 @@

target="_self" rel="noopener" target="_blank" - href="/docs/quickstart" + href="/docs/quickstart/docker" class="crunch-button crunch-button__full-background crunch-button__full-background--gradient-color crunch-button__full-background--arrow-right crunch-button__full-background--large text-white"> Getting Started diff --git a/site/static/index.html b/site/static/index.html index 995f69ea62..f2094d82c9 100644 --- a/site/static/index.html +++ b/site/static/index.html @@ -131,7 +131,7 @@

Open Source Metrics Engine

M3 is a Prometheus compatible, easy to adopt metrics engine that provides visibility for some of the world’s largest brands.
- Getting Started @@ -886,7 +886,7 @@